From 36d74db417e503a5723b9ae8ae3328b276127ec7 Mon Sep 17 00:00:00 2001 From: Seda Gundogdu Date: Mon, 29 Apr 2024 16:27:03 +0300 Subject: [PATCH 1/2] Versions updated in requirements.txt file --- packaging_automation/requirements.txt | 630 +++++++++++++------------- 1 file changed, 311 insertions(+), 319 deletions(-) diff --git a/packaging_automation/requirements.txt b/packaging_automation/requirements.txt index 9341ca77..c7804f1e 100644 --- a/packaging_automation/requirements.txt +++ b/packaging_automation/requirements.txt @@ -1,319 +1,311 @@ -# -# This file is autogenerated by pip-compile with Python 3.8 -# by the following command: -# -# pip-compile --output-file=tools/packaging_automation/requirements.txt tools/packaging_automation/requirements.in -# -anyio==4.0.0 - # via httpcore -astroid==2.15.6 - # via - # pylint - # pylint-celery - # pylint-flask - # requirements-detector -attrs==23.1.0 - # via -r tools/packaging_automation/requirements.in -bandit==1.7.5 - # via prospector -black==23.9.1 - # via -r tools/packaging_automation/requirements.in -build==1.0.3 - # via pyroma -certifi==2023.7.22 - # via - # httpcore - # httpx - # requests -cffi==1.15.1 - # via - # cryptography - # pynacl -chardet==5.2.0 - # via mbstrdecoder -charset-normalizer==3.2.0 - # via requests -click==8.1.7 - # via black -cryptography==41.0.3 - # via pyjwt -dataproperty==1.0.1 - # via - # pytablewriter - # tabledata -deprecated==1.2.14 - # via pygithub -dill==0.3.7 - # via pylint -docker==6.1.3 - # via -r tools/packaging_automation/requirements.in -docutils==0.20.1 - # via pyroma -dodgy==0.2.1 - # via prospector -dominate==2.8.0 - # via pytablewriter -exceptiongroup==1.1.3 - # via - # anyio - # pytest -flake8==5.0.4 - # via - # flake8-polyfill - # prospector -flake8-polyfill==1.0.2 - # via pep8-naming -gitdb==4.0.10 - # via gitpython -gitpython==3.1.36 - # via - # -r tools/packaging_automation/requirements.in - # bandit - # prospector -greenlet==2.0.2 - # via sqlalchemy -h11==0.14.0 - # via httpcore -httpcore==0.18.0 - # via httpx -httpx==0.25.0 - # via pypistats -idna==3.4 - # via - # anyio - # httpx - # requests -importlib-metadata==6.8.0 - # via build -iniconfig==2.0.0 - # via pytest -isort==5.12.0 - # via pylint -jinja2==3.1.2 - # via -r tools/packaging_automation/requirements.in -lazy-object-proxy==1.9.0 - # via astroid -markdown-it-py==3.0.0 - # via rich -markupsafe==2.1.3 - # via jinja2 -mbstrdecoder==1.1.3 - # via - # dataproperty - # pytablewriter - # typepy -mccabe==0.7.0 - # via - # flake8 - # prospector - # pylint -mdurl==0.1.2 - # via markdown-it-py -mypy==1.5.1 - # via prospector -mypy-extensions==1.0.0 - # via - # black - # mypy -nodeenv==1.8.0 - # via pyright -packaging==23.1 - # via - # black - # build - # docker - # prospector - # pyroma - # pytest - # requirements-detector - # typepy -parameters-validation==1.2.0 - # via -r tools/packaging_automation/requirements.in -pathlib2==2.3.7.post1 - # via -r tools/packaging_automation/requirements.in -pathspec==0.11.2 - # via black -pathvalidate==3.1.0 - # via pytablewriter -pbr==5.11.1 - # via stevedore -pep8-naming==0.10.0 - # via prospector -platformdirs==3.10.0 - # via - # black - # pylint - # pypistats -pluggy==1.3.0 - # via pytest -prettytable==3.9.0 - # via pypistats -prospector[with_everything]==1.10.2 - # via -r tools/packaging_automation/requirements.in -psycopg2-binary==2.9.7 - # via -r tools/packaging_automation/requirements.in -pycodestyle==2.9.1 - # via - # flake8 - # prospector -pycparser==2.21 - # via cffi -pycurl==7.45.2 - # via -r tools/packaging_automation/requirements.in -pydocstyle==6.3.0 - # via prospector -pyflakes==2.5.0 - # via - # flake8 - # prospector -pygithub==1.59.1 - # via -r tools/packaging_automation/requirements.in -pygments==2.16.1 - # via - # pyroma - # rich -pyjwt[crypto]==2.8.0 - # via pygithub -pylint==2.17.5 - # via - # prospector - # pylint-celery - # pylint-django - # pylint-flask - # pylint-plugin-utils -pylint-celery==0.3 - # via prospector -pylint-django==2.5.3 - # via prospector -pylint-flask==0.6 - # via prospector -pylint-plugin-utils==0.7 - # via - # prospector - # pylint-celery - # pylint-django - # pylint-flask -pynacl==1.5.0 - # via pygithub -pypistats==1.5.0 - # via -r tools/packaging_automation/requirements.in -pyproject-hooks==1.0.0 - # via build -pyright==1.1.326 - # via prospector -pyroma==4.2 - # via prospector -pytablewriter[html]==1.0.0 - # via pypistats -pytest==7.4.2 - # via -r tools/packaging_automation/requirements.in -python-dateutil==2.8.2 - # via - # pypistats - # typepy -python-dotenv==1.0.0 - # via -r tools/packaging_automation/requirements.in -python-gnupg==0.5.1 - # via -r tools/packaging_automation/requirements.in -python-slugify==8.0.1 - # via pypistats -python-string-utils==1.0.0 - # via -r tools/packaging_automation/requirements.in -pytz==2023.3.post1 - # via typepy -pyyaml==6.0.1 - # via - # -r tools/packaging_automation/requirements.in - # bandit - # prospector -requests==2.31.0 - # via - # -r tools/packaging_automation/requirements.in - # docker - # pygithub - # pyroma -requirements-detector==1.2.2 - # via prospector -rich==13.5.2 - # via bandit -semver==3.0.1 - # via requirements-detector -setoptconf-tmp==0.3.1 - # via prospector -six==1.16.0 - # via - # pathlib2 - # python-dateutil -smmap==5.0.0 - # via gitdb -sniffio==1.3.0 - # via - # anyio - # httpcore - # httpx -snowballstemmer==2.2.0 - # via pydocstyle -sqlalchemy==2.0.20 - # via -r tools/packaging_automation/requirements.in -stevedore==5.1.0 - # via bandit -tabledata==1.3.1 - # via pytablewriter -tcolorpy==0.1.3 - # via pytablewriter -termcolor==2.3.0 - # via pypistats -text-unidecode==1.3 - # via python-slugify -toml==0.10.2 - # via - # prospector - # requirements-detector - # vulture -tomli==2.0.1 - # via - # black - # build - # mypy - # pylint - # pyproject-hooks - # pytest -tomlkit==0.12.1 - # via pylint -trove-classifiers==2023.8.7 - # via pyroma -typepy[datetime]==1.3.1 - # via - # dataproperty - # pytablewriter - # tabledata -typing-extensions==4.7.1 - # via - # astroid - # black - # mypy - # pylint - # rich - # sqlalchemy -urllib3==2.0.4 - # via - # -r tools/packaging_automation/requirements.in - # docker - # requests -vulture==2.9.1 - # via prospector -wcwidth==0.2.6 - # via prettytable -websocket-client==1.6.3 - # via docker -wheel==0.41.2 - # via -r tools/packaging_automation/requirements.in -wrapt==1.15.0 - # via - # astroid - # deprecated -zipp==3.16.2 - # via importlib-metadata - -# The following packages are considered to be unsafe in a requirements file: -# setuptools +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --output-file=requirements.txt /mnt/c/GitClone/tools/packaging_automation/requirements.in +# +anyio==4.3.0 + # via httpx +astroid==2.15.8 + # via + # pylint + # pylint-celery + # pylint-flask + # requirements-detector +attrs==23.2.0 + # via -r /mnt/c/GitClone/tools/packaging_automation/requirements.in +bandit==1.7.8 + # via prospector +black==24.4.2 + # via -r /mnt/c/GitClone/tools/packaging_automation/requirements.in +build==1.2.1 + # via pyroma +certifi==2024.2.2 + # via + # httpcore + # httpx + # requests +cffi==1.16.0 + # via + # cryptography + # pynacl +chardet==5.2.0 + # via mbstrdecoder +charset-normalizer==3.3.2 + # via requests +click==8.1.7 + # via black +cryptography==42.0.5 + # via pyjwt +dataproperty==1.0.1 + # via + # pytablewriter + # tabledata +deprecated==1.2.14 + # via pygithub +dill==0.3.8 + # via pylint +docker==7.0.0 + # via -r /mnt/c/GitClone/tools/packaging_automation/requirements.in +docutils==0.21.2 + # via pyroma +dodgy==0.2.1 + # via prospector +dominate==2.9.1 + # via pytablewriter +exceptiongroup==1.2.1 + # via + # anyio + # pytest +flake8==5.0.4 + # via + # flake8-polyfill + # prospector +flake8-polyfill==1.0.2 + # via pep8-naming +gitdb==4.0.11 + # via gitpython +gitpython==3.1.43 + # via + # -r /mnt/c/GitClone/tools/packaging_automation/requirements.in + # prospector +greenlet==3.0.3 + # via sqlalchemy +h11==0.14.0 + # via httpcore +httpcore==1.0.5 + # via httpx +httpx==0.27.0 + # via pypistats +idna==3.7 + # via + # anyio + # httpx + # requests +iniconfig==2.0.0 + # via pytest +isort==5.13.2 + # via pylint +jinja2==3.1.3 + # via -r /mnt/c/GitClone/tools/packaging_automation/requirements.in +lazy-object-proxy==1.10.0 + # via astroid +markdown-it-py==3.0.0 + # via rich +markupsafe==2.1.5 + # via jinja2 +mbstrdecoder==1.1.3 + # via + # dataproperty + # pytablewriter + # typepy +mccabe==0.7.0 + # via + # flake8 + # prospector + # pylint +mdurl==0.1.2 + # via markdown-it-py +mypy==1.10.0 + # via prospector +mypy-extensions==1.0.0 + # via + # black + # mypy +nodeenv==1.8.0 + # via pyright +packaging==24.0 + # via + # black + # build + # docker + # prospector + # pyroma + # pytest + # requirements-detector + # typepy +parameters-validation==1.2.0 + # via -r /mnt/c/GitClone/tools/packaging_automation/requirements.in +pathlib2==2.3.7.post1 + # via -r /mnt/c/GitClone/tools/packaging_automation/requirements.in +pathspec==0.12.1 + # via black +pathvalidate==3.2.0 + # via pytablewriter +pbr==6.0.0 + # via stevedore +pep8-naming==0.10.0 + # via prospector +platformdirs==4.2.1 + # via + # black + # pylint + # pypistats +pluggy==1.5.0 + # via pytest +prettytable==3.10.0 + # via pypistats +prospector[with-everything,with_everything]==1.10.3 + # via -r /mnt/c/GitClone/tools/packaging_automation/requirements.in +psycopg2-binary==2.9.9 + # via -r /mnt/c/GitClone/tools/packaging_automation/requirements.in +pycodestyle==2.9.1 + # via + # flake8 + # prospector +pycparser==2.22 + # via cffi +pycurl==7.45.3 + # via -r /mnt/c/GitClone/tools/packaging_automation/requirements.in +pydocstyle==6.3.0 + # via prospector +pyflakes==2.5.0 + # via + # flake8 + # prospector +pygithub==2.3.0 + # via -r /mnt/c/GitClone/tools/packaging_automation/requirements.in +pygments==2.17.2 + # via + # pyroma + # rich +pyjwt[crypto]==2.8.0 + # via pygithub +pylint==2.17.7 + # via + # prospector + # pylint-celery + # pylint-django + # pylint-flask + # pylint-plugin-utils +pylint-celery==0.3 + # via prospector +pylint-django==2.5.3 + # via prospector +pylint-flask==0.6 + # via prospector +pylint-plugin-utils==0.7 + # via + # prospector + # pylint-celery + # pylint-django + # pylint-flask +pynacl==1.5.0 + # via pygithub +pypistats==1.5.0 + # via -r /mnt/c/GitClone/tools/packaging_automation/requirements.in +pyproject-hooks==1.1.0 + # via build +pyright==1.1.360 + # via prospector +pyroma==4.2 + # via prospector +pytablewriter[html]==1.2.0 + # via pypistats +pytest==8.2.0 + # via -r /mnt/c/GitClone/tools/packaging_automation/requirements.in +python-dateutil==2.9.0.post0 + # via + # pypistats + # typepy +python-dotenv==1.0.1 + # via -r /mnt/c/GitClone/tools/packaging_automation/requirements.in +python-gnupg==0.5.2 + # via -r /mnt/c/GitClone/tools/packaging_automation/requirements.in +python-slugify==8.0.4 + # via pypistats +python-string-utils==1.0.0 + # via -r /mnt/c/GitClone/tools/packaging_automation/requirements.in +pytz==2024.1 + # via typepy +pyyaml==6.0.1 + # via + # -r /mnt/c/GitClone/tools/packaging_automation/requirements.in + # bandit + # prospector +requests==2.31.0 + # via + # -r /mnt/c/GitClone/tools/packaging_automation/requirements.in + # docker + # pygithub + # pyroma +requirements-detector==1.2.2 + # via prospector +rich==13.7.1 + # via bandit +semver==3.0.2 + # via requirements-detector +setoptconf-tmp==0.3.1 + # via prospector +six==1.16.0 + # via + # pathlib2 + # python-dateutil +smmap==5.0.1 + # via gitdb +sniffio==1.3.1 + # via + # anyio + # httpx +snowballstemmer==2.2.0 + # via pydocstyle +sqlalchemy==2.0.29 + # via -r /mnt/c/GitClone/tools/packaging_automation/requirements.in +stevedore==5.2.0 + # via bandit +tabledata==1.3.3 + # via pytablewriter +tcolorpy==0.1.6 + # via pytablewriter +termcolor==2.4.0 + # via pypistats +text-unidecode==1.3 + # via python-slugify +toml==0.10.2 + # via + # prospector + # requirements-detector +tomli==2.0.1 + # via + # black + # build + # mypy + # pylint + # pytest + # vulture +tomlkit==0.12.4 + # via pylint +trove-classifiers==2024.4.10 + # via pyroma +typepy[datetime]==1.3.2 + # via + # dataproperty + # pytablewriter + # tabledata +typing-extensions==4.11.0 + # via + # anyio + # astroid + # black + # mypy + # pygithub + # sqlalchemy +urllib3==2.2.1 + # via + # -r /mnt/c/GitClone/tools/packaging_automation/requirements.in + # docker + # pygithub + # requests +vulture==2.11 + # via prospector +wcwidth==0.2.13 + # via prettytable +wheel==0.43.0 + # via -r /mnt/c/GitClone/tools/packaging_automation/requirements.in +wrapt==1.16.0 + # via + # astroid + # deprecated + +# The following packages are considered to be unsafe in a requirements file: +# setuptools From 16c71f9a750768610792e2c5e7869ceb2120a587 Mon Sep 17 00:00:00 2001 From: Seda Gundogdu Date: Mon, 6 May 2024 14:28:10 +0300 Subject: [PATCH 2/2] code format fixed --- packaging_automation/.python-version | 2 +- packaging_automation/README.md | 494 +- .../bash/daily-statistics-job.sh | 60 +- packaging_automation/citus_package.py | 1106 ++--- packaging_automation/common_tool_methods.py | 1594 +++--- packaging_automation/common_validations.py | 66 +- packaging_automation/dbconfig.py | 104 +- .../delete_packages_on_packagecloud.py | 138 +- .../docker_statistics_collector.py | 256 +- packaging_automation/get_postgres_versions.py | 24 +- .../github_statistics_collector.py | 408 +- .../homebrew_statistics_collector.py | 132 +- .../package_cloud_statistics_collector.py | 858 ++-- .../packaging_warning_handler.py | 340 +- packaging_automation/prepare_release.py | 1696 +++---- .../publish-into-ms-packages.py | 282 +- packaging_automation/publish_docker.py | 698 +-- packaging_automation/pypi_stats_collector.py | 166 +- packaging_automation/requirements.in | 44 +- .../templates/citus-enterprise-pkgvars.tmpl | 10 +- .../templates/citus-enterprise.spec.tmpl | 704 +-- .../templates/citus-pkgvars.tmpl | 10 +- .../templates/citus.spec.tmpl | 230 +- .../docker/alpine/alpine.tmpl.dockerfile | 116 +- .../docker/latest/docker-compose.tmpl.yml | 68 +- .../docker/latest/latest.tmpl.dockerfile | 84 +- .../postgres-14/postgres-14.tmpl.dockerfile | 84 +- .../postgres-15/postgres-15.tmpl.dockerfile | 84 +- .../multi_extension_out_prepare_release.tmpl | 30 +- .../multi_extension_sql_prepare_release.tmpl | 18 +- .../pg-auto-failover-enterprise-pkgvars.tmpl | 12 +- .../pg-auto-failover-enterprise.spec.tmpl | 748 +-- .../templates/pg-auto-failover-pkgvars.tmpl | 12 +- .../templates/pg-auto-failover.spec.tmpl | 170 +- .../templates/pgxn/META.tmpl.json | 108 +- .../templates/pgxn/pkgvars.tmpl | 6 +- packaging_automation/test_citus_package.py | 258 +- packaging_automation/tests/files/citus.spec | 816 ++-- .../tests/files/citus_include_10_2_4.spec | 822 ++-- .../tests/files/debian.changelog.refer | 4310 ++++++++-------- .../tests/files/debian/changelog | 4310 ++++++++-------- .../pg_exclude.yml | 10 +- .../files/get_postgres_versions_tests/pkgvars | 8 +- .../postgres-matrix.yml | 44 +- .../tests/files/gpg/packaging.gpg | 26 +- .../files/gpg/packaging_with_passphrase.gpg | 24 +- .../packaging_warning/packaging_ignore.yml | 30 +- .../packaging_ignore_without_rpm_rules.yml | 24 +- .../sample_warning_build_output_deb.txt | 328 +- ...ple_warning_build_output_deb_only_base.txt | 310 +- .../sample_warning_build_output_rpm.txt | 324 +- ...ample_warning_build_output_rpm_success.txt | 324 +- packaging_automation/tests/files/pkgvars | 10 +- .../tests/files/postgres-matrix.yml | 34 +- .../postgres-matrix-success.yml | 32 +- .../verify/debian_changelog_with_10.2.4.txt | 4322 ++++++++--------- .../files/verify/expected_alpine_10.0.3.txt | 116 +- .../verify/expected_debian_latest_v10.2.4.txt | 12 +- .../files/verify/expected_pkgvars_10.2.4.txt | 8 +- .../verify/rpm_latest_changelog_reference.txt | 2 +- .../tests/test_citus_package.py | 466 +- .../tests/test_citus_package_utils.py | 430 +- .../tests/test_common_tool_methods.py | 718 +-- .../tests/test_docker_statistics_collector.py | 148 +- .../tests/test_github_statistics_collector.py | 176 +- .../test_homebrew_statistics_collector.py | 76 +- ...test_package_cloud_statistics_collector.py | 180 +- .../tests/test_packaging_warning_handler.py | 414 +- .../tests/test_prepare_release.py | 504 +- .../tests/test_publish_docker.py | 256 +- .../tests/test_update_docker.py | 362 +- .../tests/test_update_package_properties.py | 474 +- .../tests/test_update_pgxn.py | 114 +- packaging_automation/tests/test_utils.py | 62 +- packaging_automation/update_docker.py | 494 +- .../update_package_properties.py | 904 ++-- packaging_automation/update_pgxn.py | 158 +- .../upload_to_package_cloud.py | 368 +- packaging_automation/validate_build_output.py | 34 +- .../write_postgres_versions_into_file.py | 22 +- 80 files changed, 16913 insertions(+), 16913 deletions(-) diff --git a/packaging_automation/.python-version b/packaging_automation/.python-version index eee6392d..afdebbf3 100644 --- a/packaging_automation/.python-version +++ b/packaging_automation/.python-version @@ -1 +1 @@ -3.8.16 +3.8.16 diff --git a/packaging_automation/README.md b/packaging_automation/README.md index 40e29df2..f792b464 100644 --- a/packaging_automation/README.md +++ b/packaging_automation/README.md @@ -1,247 +1,247 @@ -## Python Environment Installation - -Before using script, you need to make sure that Python > 3.8 is installed in your system. - -### Clone Tools Repository - -git clone https://github.com/citusdata/tools.git - -Enter 'tools' directory - -``` console -cd tools -``` - -### Install Required Python Libraries - -Verify pip installation - -``` console -python -m pip --version -``` - -Output should be like following - -``` console -pip 21.1.2 from /home/vagrant/.local/lib/python3.8/site-packages/pip (python 3.8) -``` - -If you get error, you should first install pip - -``` console -sudo apt install python3-pip -``` - -Install the required libraries to execute the script - -``` console -python -m pip install -r packaging_automation/requirements.txt -``` - -If all the steps above completed successfully , you are ready for script execution - -# **Prepare Release ** - -prepare-release.py script performs the pre-packaging configurations in citus/citus-enterprise projects. - -## Script Usage - -Script can be used for either major release (i.e. third digit of release is '0' e.g. 10.1.0) or patch release (i.e. -third digit of release is other than '0' e.g. 10.0.4). - -### Available flags - -**--gh_token:** Personal access token that is authorized to commit citus/citus-enterprise projects. (Required) - -**--prj_name:** Project to be released. Allowed values 'citus' and 'citus-enterprise (Required) - -**--prj_ver:** Upcoming version to be used for release. should include three level of digits separated by dots, e.g: -10.0.1 -(Required) - -**--main_branch:** Branch to be used as base to be used for configuration changes. There is no need for base scenario. -This flag can be used for testing purposes. If not used, default branch value is used; i.e. for 'citus' 'master, for ' -citus-enterprise' 'enterprise-master' - -**--is_test:** If used, branches would not be pushed remote repository and created release branches would be prefixed -with 'test'. Default value is False - -**--cherry_pick_enabled:** Available only for patch release. If used, --earliest_pr_date flag also should be used.Gets -all PR's with 'backport' label created after earliest_pr_date - -**--earliest_pr_date:** Used with --cherry-pick-enabled flag. Date format is 'Y.m.d' e.g 2012.01.21. PR's merged after -this date would be listed and cherry-picked. - -**--schema_version:** Available only for patch release. If used, schema version in citus.control file would be updated. - -### Example Usage - -#### Major - -``` console -python -m packaging_automation.prepare_release --gh_token --prj_name citus --prj_ver 10.1.0 -``` - -#### Patch - -``` console -python -m packaging_automation.prepare_release --gh_token --prj_name citus-enterprise --prj_ver 10.0.4 --schema_version 10.0-5 -``` - -## Update Package Properties -Update package properties script updates debian and redhat package configuration files. - -## Script Usage - -Script can be used in projects following: citus, citus-enterprise, pg-auto-failover, pg-auto-failover-enterprise - -## Available flags -**--gh_token:** Personal access token that is authorized to commit citus/citus-enterprise projects. (Required) - -**--prj_name:** Project to be released. Allowed values 'citus' and 'citus-enterprise (Required) - -**--tag-name:** Tag to be used for release. should include three level of digits separated by dots starting with v, e.g: -v10.0.1 -(Required) - -**--fancy_ver_no:** If not set default is 1 and fancy versioning is disabled. If set and greater than 1, fancy is enabled - -**--email:** Email to be printed in changelogs (Required) - -**--name:** Name to be printed in changelogs (Required) - -**--date:**: Date to be printed in changelogs - -**--pipeline:** If set, exec path should also be set and exec path will be used as packaging source. If not set, it is evaluated as false and packaging code will be cloned - -**--exec_path:** If pipeline parameter is used, this parameter should be set. Shows the path of packaging sources - -**--is_test:** If true, the branch created will not be published into remote repository - -### Example Usage - -```console -python -m packaging_automation.update_package_properties --gh_token=${{ secrets.GH_TOKEN }} \ - --prj_name "${PRJ_NAME}" --tag_name ${{ github.event.inputs.tag_name }} \ - --email ${{ github.event.inputs.microsoft_email }} --name ${{ github.event.inputs.name }} --pipeline \ - --exec_path "$(pwd)" -``` - - -## Update Docker - -Update docker script updates the docker and changelog files in docker repository required for new release of docker -images after citus/postgres release - -## Script Usage - -Script can be used for both citus version upgrades and PostgreSQL updates. - -### Available flags - -**--gh_token:** Personal access token that is authorized to commit docker project. (Required) - -**--postgres_version:** Optional value that could be set when new postgres version needs to be set in docker images - -**--prj_ver:** Upcoming version to be used for release. should include three level of digits separated by dots, e.g: -10.0.1 -(Required) - -**--is_test:** If used, branches would not be pushed remote repository and PR would not be created (Optional) - -### Example - -#### Citus Upgrade - -``` console - python -m packaging_automation.update_docker --gh_token --prj_ver 10.0.4 -``` - -#### Citus and PostgreSQL version upgrade - -``` console - python -m packaging_automation.update_docker --gh_token --prj_ver 10.0.4 --postgres-version 14.0 -``` - -## Update Pgxn - -Update pgxn script updates the files related to pgxn in all-pgxn branch in packaging repo. - -## Script Usage - -Script can be used for citus version upgrades. - -### Available flags - -**--gh_token:** Personal access token that is authorized to commit packaging project. (Required) - -**--prj_ver:** Upcoming version to be used for release. should include three level of digits separated by dots, e.g: -10.0.1 -(Required) - -**--is_test:** If used, branches would not be pushed remote repository and PR would not be created (Optional) - -### Example - -``` console - python -m packaging_automation.update_pgxn --gh_token --prj_ver 10.0.4 -``` - -## Upload to package cloud -This script uploads built deb and rpm packages. - -## Script usage -This script uploads all the rpm and deb packages from given directory into package cloud,if current branch equals to main branch . - -### Available flags - -**--platform:** Personal access token that is authorized to commit packaging project. (Required) - -**--package_cloud_api_token:** Token required to get authorization from package cloud to upload (Required) - -**--repository_name:** Packagecloud repository name to upload Available repos: "sample","citusdata/enterprise","citusdata/community","citusdata/community-nightlies","citusdata/enterprise-nightlies","citusdata/azure" (Required) - -**--output_file_path:** Directory that contains deb and rpm files (Required) - -**--current_branch:** Current branch that the pipeline is working on (Required) - -**--main_branch:** Main branch that is the script to be executed (Required) - -### Example - -``` console - python -m tools.packaging_automation.upload_to_package_cloud \ - --platform ${{ matrix.platform }} \ - --package_cloud_api_token ${{ secrets.PACKAGE_CLOUD_API_TOKEN }} \ - --repository_name "${PACKAGE_CLOUD_REPO_NAME}" \ - --output_file_path "$(pwd)/packages" \ - --current_branch all-citus \ - --main_branch ${MAIN_BRANCH} -``` - -## Publish docker -This script builds and publishes given docker image type - -## Script Usage -Script executes docker build on given image type and publishes the docker image with related tags - -### Available flags - -**--github_ref:** Github Action parameter denoting tag or branch name depending on trigger type . (Required) - -**--pipeline_trigger_type:** Pipeline trigger type. Available option: push,schedule, workflow_dispatch (Required) - -**--tag_name:** Tag name if trigger type is push and - -**--manual_trigger_type:** Trigger type when executing the script manually. Available options: main,tags,nightly (Required) - -**--image_type:** Image type to be published. Available options: latest,alpine,nightly, postgre12 - -### Example - -``` console - python -m tools.packaging_automation.publish_docker --pipeline_trigger_type "${GITHUB_EVENT_NAME}" \ - --exec_path "$(pwd)" --tag_name ${{ github.event.inputs.tag_name }} \ - --manual_trigger_type ${{ github.event.inputs.trigger_type }} -``` - +## Python Environment Installation + +Before using script, you need to make sure that Python > 3.8 is installed in your system. + +### Clone Tools Repository + +git clone https://github.com/citusdata/tools.git + +Enter 'tools' directory + +``` console +cd tools +``` + +### Install Required Python Libraries + +Verify pip installation + +``` console +python -m pip --version +``` + +Output should be like following + +``` console +pip 21.1.2 from /home/vagrant/.local/lib/python3.8/site-packages/pip (python 3.8) +``` + +If you get error, you should first install pip + +``` console +sudo apt install python3-pip +``` + +Install the required libraries to execute the script + +``` console +python -m pip install -r packaging_automation/requirements.txt +``` + +If all the steps above completed successfully , you are ready for script execution + +# **Prepare Release ** + +prepare-release.py script performs the pre-packaging configurations in citus/citus-enterprise projects. + +## Script Usage + +Script can be used for either major release (i.e. third digit of release is '0' e.g. 10.1.0) or patch release (i.e. +third digit of release is other than '0' e.g. 10.0.4). + +### Available flags + +**--gh_token:** Personal access token that is authorized to commit citus/citus-enterprise projects. (Required) + +**--prj_name:** Project to be released. Allowed values 'citus' and 'citus-enterprise (Required) + +**--prj_ver:** Upcoming version to be used for release. should include three level of digits separated by dots, e.g: +10.0.1 +(Required) + +**--main_branch:** Branch to be used as base to be used for configuration changes. There is no need for base scenario. +This flag can be used for testing purposes. If not used, default branch value is used; i.e. for 'citus' 'master, for ' +citus-enterprise' 'enterprise-master' + +**--is_test:** If used, branches would not be pushed remote repository and created release branches would be prefixed +with 'test'. Default value is False + +**--cherry_pick_enabled:** Available only for patch release. If used, --earliest_pr_date flag also should be used.Gets +all PR's with 'backport' label created after earliest_pr_date + +**--earliest_pr_date:** Used with --cherry-pick-enabled flag. Date format is 'Y.m.d' e.g 2012.01.21. PR's merged after +this date would be listed and cherry-picked. + +**--schema_version:** Available only for patch release. If used, schema version in citus.control file would be updated. + +### Example Usage + +#### Major + +``` console +python -m packaging_automation.prepare_release --gh_token --prj_name citus --prj_ver 10.1.0 +``` + +#### Patch + +``` console +python -m packaging_automation.prepare_release --gh_token --prj_name citus-enterprise --prj_ver 10.0.4 --schema_version 10.0-5 +``` + +## Update Package Properties +Update package properties script updates debian and redhat package configuration files. + +## Script Usage + +Script can be used in projects following: citus, citus-enterprise, pg-auto-failover, pg-auto-failover-enterprise + +## Available flags +**--gh_token:** Personal access token that is authorized to commit citus/citus-enterprise projects. (Required) + +**--prj_name:** Project to be released. Allowed values 'citus' and 'citus-enterprise (Required) + +**--tag-name:** Tag to be used for release. should include three level of digits separated by dots starting with v, e.g: +v10.0.1 +(Required) + +**--fancy_ver_no:** If not set default is 1 and fancy versioning is disabled. If set and greater than 1, fancy is enabled + +**--email:** Email to be printed in changelogs (Required) + +**--name:** Name to be printed in changelogs (Required) + +**--date:**: Date to be printed in changelogs + +**--pipeline:** If set, exec path should also be set and exec path will be used as packaging source. If not set, it is evaluated as false and packaging code will be cloned + +**--exec_path:** If pipeline parameter is used, this parameter should be set. Shows the path of packaging sources + +**--is_test:** If true, the branch created will not be published into remote repository + +### Example Usage + +```console +python -m packaging_automation.update_package_properties --gh_token=${{ secrets.GH_TOKEN }} \ + --prj_name "${PRJ_NAME}" --tag_name ${{ github.event.inputs.tag_name }} \ + --email ${{ github.event.inputs.microsoft_email }} --name ${{ github.event.inputs.name }} --pipeline \ + --exec_path "$(pwd)" +``` + + +## Update Docker + +Update docker script updates the docker and changelog files in docker repository required for new release of docker +images after citus/postgres release + +## Script Usage + +Script can be used for both citus version upgrades and PostgreSQL updates. + +### Available flags + +**--gh_token:** Personal access token that is authorized to commit docker project. (Required) + +**--postgres_version:** Optional value that could be set when new postgres version needs to be set in docker images + +**--prj_ver:** Upcoming version to be used for release. should include three level of digits separated by dots, e.g: +10.0.1 +(Required) + +**--is_test:** If used, branches would not be pushed remote repository and PR would not be created (Optional) + +### Example + +#### Citus Upgrade + +``` console + python -m packaging_automation.update_docker --gh_token --prj_ver 10.0.4 +``` + +#### Citus and PostgreSQL version upgrade + +``` console + python -m packaging_automation.update_docker --gh_token --prj_ver 10.0.4 --postgres-version 14.0 +``` + +## Update Pgxn + +Update pgxn script updates the files related to pgxn in all-pgxn branch in packaging repo. + +## Script Usage + +Script can be used for citus version upgrades. + +### Available flags + +**--gh_token:** Personal access token that is authorized to commit packaging project. (Required) + +**--prj_ver:** Upcoming version to be used for release. should include three level of digits separated by dots, e.g: +10.0.1 +(Required) + +**--is_test:** If used, branches would not be pushed remote repository and PR would not be created (Optional) + +### Example + +``` console + python -m packaging_automation.update_pgxn --gh_token --prj_ver 10.0.4 +``` + +## Upload to package cloud +This script uploads built deb and rpm packages. + +## Script usage +This script uploads all the rpm and deb packages from given directory into package cloud,if current branch equals to main branch . + +### Available flags + +**--platform:** Personal access token that is authorized to commit packaging project. (Required) + +**--package_cloud_api_token:** Token required to get authorization from package cloud to upload (Required) + +**--repository_name:** Packagecloud repository name to upload Available repos: "sample","citusdata/enterprise","citusdata/community","citusdata/community-nightlies","citusdata/enterprise-nightlies","citusdata/azure" (Required) + +**--output_file_path:** Directory that contains deb and rpm files (Required) + +**--current_branch:** Current branch that the pipeline is working on (Required) + +**--main_branch:** Main branch that is the script to be executed (Required) + +### Example + +``` console + python -m tools.packaging_automation.upload_to_package_cloud \ + --platform ${{ matrix.platform }} \ + --package_cloud_api_token ${{ secrets.PACKAGE_CLOUD_API_TOKEN }} \ + --repository_name "${PACKAGE_CLOUD_REPO_NAME}" \ + --output_file_path "$(pwd)/packages" \ + --current_branch all-citus \ + --main_branch ${MAIN_BRANCH} +``` + +## Publish docker +This script builds and publishes given docker image type + +## Script Usage +Script executes docker build on given image type and publishes the docker image with related tags + +### Available flags + +**--github_ref:** Github Action parameter denoting tag or branch name depending on trigger type . (Required) + +**--pipeline_trigger_type:** Pipeline trigger type. Available option: push,schedule, workflow_dispatch (Required) + +**--tag_name:** Tag name if trigger type is push and + +**--manual_trigger_type:** Trigger type when executing the script manually. Available options: main,tags,nightly (Required) + +**--image_type:** Image type to be published. Available options: latest,alpine,nightly, postgre12 + +### Example + +``` console + python -m tools.packaging_automation.publish_docker --pipeline_trigger_type "${GITHUB_EVENT_NAME}" \ + --exec_path "$(pwd)" --tag_name ${{ github.event.inputs.tag_name }} \ + --manual_trigger_type ${{ github.event.inputs.trigger_type }} +``` + diff --git a/packaging_automation/bash/daily-statistics-job.sh b/packaging_automation/bash/daily-statistics-job.sh index 40d91d7c..19bed9d6 100755 --- a/packaging_automation/bash/daily-statistics-job.sh +++ b/packaging_automation/bash/daily-statistics-job.sh @@ -1,30 +1,30 @@ -#!/bin/bash -[ -z "${JOB_NAME:-}" ] && echo "JOB_NAME should be non-empty value" && exit 1 -[ -z "${DB_USER_NAME:-}" ] && echo "DB_USER_NAME should be non-empty value" && exit 1 -[ -z "${DB_PASSWORD:-}" ] && echo "DB_PASSWORD should be non-empty value" && exit 1 -[ -z "${DB_HOST_AND_PORT:-}" ] && echo "DB_HOST_AND_PORT should be non-empty value" && exit 1 -[ -z "${DB_NAME:-}" ] && echo "DB_NAME should be non-empty value" && exit 1 - -if [[ ${JOB_NAME} == 'docker_pull_citus' ]]; then - python -m packaging_automation.docker_statistics_collector \ - --repo_name citus \ - --db_user_name "${DB_USER_NAME}" \ - --db_password "${DB_PASSWORD}" \ - --db_host_and_port "${DB_HOST_AND_PORT}" \ - --db_name "${DB_NAME}" -elif [[ ${JOB_NAME} == 'github_clone_citus' ]]; then - [ -z "${GH_TOKEN:-}" ] && echo "GH_TOKEN should be non-empty value" && exit 1 - python -m packaging_automation.github_statistics_collector \ - --repo_name citus \ - --db_user_name "${DB_USER_NAME}" \ - --db_password "${DB_PASSWORD}" \ - --db_host_and_port "${DB_HOST_AND_PORT}" \ - --db_name "${DB_NAME}" \ - --github_token "${GH_TOKEN}" -elif [[ ${JOB_NAME} == 'homebrew_citus' ]]; then - python -m packaging_automation.homebrew_statistics_collector \ - --db_user_name "${DB_USER_NAME}" \ - --db_password "${DB_PASSWORD}" \ - --db_host_and_port "${DB_HOST_AND_PORT}" \ - --db_name "${DB_NAME}" -fi +#!/bin/bash +[ -z "${JOB_NAME:-}" ] && echo "JOB_NAME should be non-empty value" && exit 1 +[ -z "${DB_USER_NAME:-}" ] && echo "DB_USER_NAME should be non-empty value" && exit 1 +[ -z "${DB_PASSWORD:-}" ] && echo "DB_PASSWORD should be non-empty value" && exit 1 +[ -z "${DB_HOST_AND_PORT:-}" ] && echo "DB_HOST_AND_PORT should be non-empty value" && exit 1 +[ -z "${DB_NAME:-}" ] && echo "DB_NAME should be non-empty value" && exit 1 + +if [[ ${JOB_NAME} == 'docker_pull_citus' ]]; then + python -m packaging_automation.docker_statistics_collector \ + --repo_name citus \ + --db_user_name "${DB_USER_NAME}" \ + --db_password "${DB_PASSWORD}" \ + --db_host_and_port "${DB_HOST_AND_PORT}" \ + --db_name "${DB_NAME}" +elif [[ ${JOB_NAME} == 'github_clone_citus' ]]; then + [ -z "${GH_TOKEN:-}" ] && echo "GH_TOKEN should be non-empty value" && exit 1 + python -m packaging_automation.github_statistics_collector \ + --repo_name citus \ + --db_user_name "${DB_USER_NAME}" \ + --db_password "${DB_PASSWORD}" \ + --db_host_and_port "${DB_HOST_AND_PORT}" \ + --db_name "${DB_NAME}" \ + --github_token "${GH_TOKEN}" +elif [[ ${JOB_NAME} == 'homebrew_citus' ]]; then + python -m packaging_automation.homebrew_statistics_collector \ + --db_user_name "${DB_USER_NAME}" \ + --db_password "${DB_PASSWORD}" \ + --db_host_and_port "${DB_HOST_AND_PORT}" \ + --db_name "${DB_NAME}" +fi diff --git a/packaging_automation/citus_package.py b/packaging_automation/citus_package.py index 09b12db1..fa386693 100644 --- a/packaging_automation/citus_package.py +++ b/packaging_automation/citus_package.py @@ -1,553 +1,553 @@ -import argparse -import glob -import os -import subprocess -from enum import Enum -from typing import Dict -from typing import List -from typing import Tuple - -import docker -import gnupg -import yaml -from attr import dataclass -from dotenv import dotenv_values -from parameters_validation import non_blank, non_empty, validate_parameters - -from .common_tool_methods import ( - DEFAULT_ENCODING_FOR_FILE_HANDLING, - DEFAULT_UNICODE_ERROR_HANDLER, - PackageType, - get_gpg_fingerprints_by_name, - get_supported_postgres_nightly_versions, - get_supported_postgres_release_versions, - platform_names, - run_with_output, - supported_platforms, - transform_key_into_base64_str, -) -from .packaging_warning_handler import validate_output - -GPG_KEY_NAME = "packaging@citusdata.com" - -POSTGRES_VERSION_FILE = "supported-postgres" -POSTGRES_MATRIX_FILE_NAME = "postgres-matrix.yml" -POSTGRES_EXCLUDE_FILE_NAME = "pg_exclude.yml" - -docker_image_names = { - "almalinux": "almalinux", - "rockylinux": "almalinux", - "debian": "debian", - "el/9": "almalinux-9", - "ol/9": "almalinux-9", - "el/8": "almalinux-8", - "el": "centos", - "ol": "oraclelinux", - "ubuntu": "ubuntu", - "pgxn": "pgxn", -} - -package_docker_platform_dict = { - "almalinux,9": "almalinux/9", - "almalinux,8": "almalinux/8", - "centos,8": "el/8", - "centos,7": "el/7", - "debian,bookworm": "debian/bookworm", - "debian,bullseye": "debian/bullseye", - "debian,buster": "debian/buster", - "debian,stretch": "debian/stretch", - "oraclelinux,8": "ol/8", - "oraclelinux,7": "ol/7", - "oraclelinux,6": "ol/6", - "ubuntu,focal": "ubuntu/focal", - "ubuntu,bionic": "ubuntu/bionic", - "ubuntu,jammy": "ubuntu/jammy", - "ubuntu,kinetic": "ubuntu/kinetic", - "pgxn": "pgxn", -} - - -def get_package_type_by_docker_image_name(docker_image_name: str) -> PackageType: - return ( - PackageType.deb - if docker_image_name.startswith(("ubuntu", "debian")) - else PackageType.rpm - ) - - -class BuildType(Enum): - release = 1 - nightly = 2 - - -class PostgresVersionDockerImageType(Enum): - multiple = 1 - single = 2 - - -platform_postgres_version_source = { - "el": PostgresVersionDockerImageType.multiple, - "ol": PostgresVersionDockerImageType.multiple, - "almalinux": PostgresVersionDockerImageType.multiple, - "debian": PostgresVersionDockerImageType.single, - "ubuntu": PostgresVersionDockerImageType.single, - "pgxn": PostgresVersionDockerImageType.single, -} - -PKGVARS_FILE = "pkgvars" -SINGLE_DOCKER_POSTGRES_PREFIX = "all" -PACKAGES_DIR_NAME = "packages" - - -def decode_os_and_release(platform_name: str) -> Tuple[str, str]: - parts = platform_name.split("/") - - if len(parts) == 0 or len(parts) > 2 or (len(parts) == 1 and parts[0] != "pgxn"): - raise ValueError( - "Platforms should have two parts divided by '/' or should be 'pgxn' " - ) - if len(parts) == 1 and parts[0] == "pgxn": - os_name = "pgxn" - os_release = "" - else: - os_name = parts[0] - os_release = parts[1] - if os_name not in supported_platforms: - raise ValueError( - f"{os_name} is not among supported operating systems. Supported operating systems are as below:\n " - f"{','.join(supported_platforms.keys())}" - ) - if os_release not in supported_platforms[os_name]: - raise ValueError( - f"{os_release} is not among supported releases for {os_name}." - f"Supported releases are as below:\n {','.join(supported_platforms[os_name])}" - ) - return os_name, os_release - - -def is_docker_running() -> bool: - try: - docker_client = docker.from_env() - docker_client.ping() - return True - ## Exception type is not defined in API so I keep as is - except: # noqa: E722 # pylint: disable=bare-except - return False - - -@dataclass -class SigningCredentials: - secret_key: str - passphrase: str - - -@dataclass -class InputOutputParameters: - input_files_dir: str - output_dir: str - output_validation: bool - - @staticmethod - @validate_parameters - # disabled since this is related to parameter_validations library methods - # pylint: disable=no-value-for-parameter - def build( - input_files_dir: non_empty(non_blank(str)), - output_dir: non_empty(non_blank(str)), - output_validation: bool = False, - ): - return InputOutputParameters( - input_files_dir=input_files_dir, - output_dir=output_dir, - output_validation=output_validation, - ) - - -@validate_parameters -# disabled since this is related to parameter_validations library methods -# pylint: disable=no-value-for-parameter -def get_signing_credentials( - packaging_secret_key: str, packaging_passphrase: non_empty(non_blank(str)) -) -> SigningCredentials: - if packaging_secret_key: - secret_key = packaging_secret_key - else: - fingerprints = get_gpg_fingerprints_by_name(GPG_KEY_NAME) - if len(fingerprints) == 0: - raise ValueError(f"Key for {GPG_KEY_NAME} does not exist") - - gpg = gnupg.GPG() - - private_key = gpg.export_keys( - fingerprints[0], secret=True, passphrase=packaging_passphrase - ) - secret_key = transform_key_into_base64_str(private_key) - - passphrase = packaging_passphrase - return SigningCredentials(secret_key=secret_key, passphrase=passphrase) - - -def write_postgres_versions_into_file( - input_files_dir: str, package_version: str, os_name: str = "", platform: str = "" -): - # In ADO pipelines function without os_name and platform is used. If these parameters are unset - if not os_name: - print("os name is empty") - release_versions = get_supported_postgres_release_versions( - f"{input_files_dir}/{POSTGRES_MATRIX_FILE_NAME}", package_version - ) - nightly_versions = get_supported_postgres_nightly_versions( - f"{input_files_dir}/{POSTGRES_MATRIX_FILE_NAME}" - ) - else: - print(f"os: {os_name} platform: {platform}") - release_versions, nightly_versions = get_postgres_versions( - platform=platform, input_files_dir=input_files_dir - ) - release_version_str = ",".join(release_versions) - nightly_version_str = ",".join(nightly_versions) - print( - f"Release versions: {release_version_str}, Nightly versions: {nightly_version_str}" - ) - with open( - f"{input_files_dir}/{POSTGRES_VERSION_FILE}", - "w", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as f: - f.write(f"release_versions={release_version_str}\n") - f.write(f"nightly_versions={nightly_version_str}\n") - - -def sign_packages( - sub_folder: str, - signing_credentials: SigningCredentials, - input_output_parameters: InputOutputParameters, -): - output_path = f"{input_output_parameters.output_dir}/{sub_folder}" - deb_files = glob.glob(f"{output_path}/*.deb", recursive=True) - rpm_files = glob.glob(f"{output_path}/*.rpm", recursive=True) - os.environ["PACKAGING_PASSPHRASE"] = signing_credentials.passphrase - os.environ["PACKAGING_SECRET_KEY"] = signing_credentials.secret_key - - if len(rpm_files) > 0: - print("Started RPM Signing...") - result = run_with_output( - f"docker run --rm -v {output_path}:/packages/{sub_folder} -e PACKAGING_SECRET_KEY -e " - f"PACKAGING_PASSPHRASE citusdata/packaging:rpmsigner", - text=True, - ) - output = result.stdout - print(f"Result:{output}") - - if result.returncode != 0: - raise ValueError(f"Error while signing rpm files.Err:{result.stderr}") - if input_output_parameters.output_validation: - validate_output( - output, - f"{input_output_parameters.input_files_dir}/packaging_ignore.yml", - PackageType.rpm, - ) - - print("RPM signing finished successfully.") - - if len(deb_files) > 0: - print("Started DEB Signing...") - - # output is required to understand the error if any so check parameter is not used - # pylint: disable=subprocess-run-check - result = subprocess.run( - [ - "docker", - "run", - "--rm", - "-v", - f"{output_path}:/packages/{sub_folder}", - "-e", - "PACKAGING_SECRET_KEY", - "-e", - "PACKAGING_PASSPHRASE", - "citusdata/packaging:debsigner", - ], - text=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - input=signing_credentials.passphrase, - ) - output = result.stdout - print(f"Result:{output}") - - if result.returncode != 0: - raise ValueError(f"Error while signing deb files.Err:{result.stdout}") - - if input_output_parameters.output_validation: - validate_output( - result.stdout, - f"{input_output_parameters.input_files_dir}/packaging_ignore.yml", - PackageType.deb, - ) - - print("DEB signing finished successfully.") - - -def get_postgres_versions( - platform: str, input_files_dir: str -) -> Tuple[List[str], List[str]]: - package_version = get_package_version_without_release_stage_from_pkgvars( - input_files_dir - ) - release_versions = get_supported_postgres_release_versions( - f"{input_files_dir}/{POSTGRES_MATRIX_FILE_NAME}", package_version - ) - nightly_versions = get_supported_postgres_nightly_versions( - f"{input_files_dir}/{POSTGRES_MATRIX_FILE_NAME}" - ) - - exclude_dict_release, exclude_dict_nightly = get_exclude_dict( - input_files_dir=input_files_dir - ) - - platform_key_release = "all" if "all" in exclude_dict_release else platform - platform_key_nightly = "all" if "all" in exclude_dict_nightly else platform - - if exclude_dict_release and platform_key_release in exclude_dict_release: - release_versions = [ - v - for v in release_versions - if v not in exclude_dict_release[platform_key_release] - ] - - if exclude_dict_nightly and platform_key_nightly in exclude_dict_nightly: - nightly_versions = [ - v - for v in release_versions - if v not in exclude_dict_nightly[platform_key_nightly] - ] - - return release_versions, nightly_versions - - -@validate_parameters -# disabled since this is related to parameter_validations library methods -# pylint: disable=no-value-for-parameter -def build_package( - github_token: non_empty(non_blank(str)), - build_type: BuildType, - docker_platform: str, - postgres_version: str, - input_output_parameters: InputOutputParameters, - is_test: bool = False, -): - docker_image_name = "packaging" if not is_test else "packaging-test" - postgres_extension = "all" if postgres_version == "all" else f"pg{postgres_version}" - os.environ["GITHUB_TOKEN"] = github_token - os.environ["CONTAINER_BUILD_RUN_ENABLED"] = "true" - if not os.path.exists(input_output_parameters.output_dir): - os.makedirs(input_output_parameters.output_dir) - - docker_command = ( - f"docker run --rm -v {input_output_parameters.output_dir}:/packages -v " - f"{input_output_parameters.input_files_dir}:/buildfiles:ro " - f"-e GITHUB_TOKEN -e PACKAGE_ENCRYPTION_KEY -e UNENCRYPTED_PACKAGE -e CONTAINER_BUILD_RUN_ENABLED " - f"-e MSRUSTUP_PAT -e CRATES_IO_MIRROR_FEED_TOKEN -e INSTALL_RUST -e CI " - f"citus/{docker_image_name}:{docker_platform}-{postgres_extension} {build_type.name}" - ) - - print(f"Executing docker command: {docker_command}") - output = run_with_output(docker_command, text=True) - - if output.stdout: - print("Output:" + output.stdout) - if output.returncode != 0: - raise ValueError(output.stderr) - - if input_output_parameters.output_validation: - validate_output( - output.stdout, - f"{input_output_parameters.input_files_dir}/packaging_ignore.yml", - get_package_type_by_docker_image_name(docker_platform), - ) - - -def get_release_package_folder_name(os_name: str, os_version: str) -> str: - return f"{os_name}-{os_version}" - - -# Gets the docker image name for the given platform. -# Normally, the docker image name has one to one matching with os name. -# However, there are some exceptions for this rule. For example, docker image name for both el-9 and ol-9 is -# almalinux-9. This is because, both el/9 and ol/9 platforms can use packages built on almalinux-9 docker image. -def get_docker_image_name(platform: str): - if platform in docker_image_names: - return docker_image_names[platform] - os_name, os_version = decode_os_and_release(platform) - return ( - f"{docker_image_names[os_name]}-{os_version}" - if os_version - else f"{docker_image_names[os_name]}" - ) - - -@validate_parameters -# disabled since this is related to parameter_validations library methods -# pylint: disable=no-value-for-parameter -# pylint: disable= too-many-locals -def build_packages( - github_token: non_empty(non_blank(str)), - platform: non_empty(non_blank(str)), - build_type: BuildType, - signing_credentials: SigningCredentials, - input_output_parameters: InputOutputParameters, - is_test: bool = False, -) -> None: - os_name, os_version = decode_os_and_release(platform) - release_versions, nightly_versions = get_postgres_versions( - platform, input_output_parameters.input_files_dir - ) - - signing_credentials = get_signing_credentials( - signing_credentials.secret_key, signing_credentials.passphrase - ) - - if platform != "pgxn": - package_version = get_package_version_without_release_stage_from_pkgvars( - input_output_parameters.input_files_dir - ) - write_postgres_versions_into_file( - input_output_parameters.input_files_dir, package_version, os_name, platform - ) - - if not signing_credentials.passphrase: - raise ValueError("PACKAGING_PASSPHRASE should not be null or empty") - postgress_versions_to_process = ( - release_versions if build_type == BuildType.release else nightly_versions - ) - - if ( - platform_postgres_version_source[os_name] - == PostgresVersionDockerImageType.single - ): - postgres_docker_extension_iterator = ["all"] - else: - postgres_docker_extension_iterator = postgress_versions_to_process - - docker_image_name = get_docker_image_name(platform) - output_sub_folder = get_release_package_folder_name(os_name, os_version) - input_output_parameters.output_dir = ( - f"{input_output_parameters.output_dir}/{output_sub_folder}" - ) - for postgres_docker_extension in postgres_docker_extension_iterator: - print( - f"Package build for {os_name}-{os_version} for postgres {postgres_docker_extension} started... " - ) - build_package( - github_token, - build_type, - docker_image_name, - postgres_docker_extension, - input_output_parameters, - is_test, - ) - print( - f"Package build for {os_name}-{os_version} for postgres {postgres_docker_extension} finished " - ) - - sign_packages(output_sub_folder, signing_credentials, input_output_parameters) - - -def get_build_platform(packaging_platform: str, packaging_docker_platform: str) -> str: - return ( - package_docker_platform_dict[packaging_docker_platform] - if packaging_docker_platform - else packaging_platform - ) - - -def get_package_version_from_pkgvars(input_files_dir: str): - pkgvars_config = dotenv_values(f"{input_files_dir}/pkgvars") - package_version_with_suffix = pkgvars_config["pkglatest"] - version_parts = package_version_with_suffix.split(".") - # hll is working with minor release format e.g. 2.16.citus-1 - pkg_name = pkgvars_config["pkgname"] - - if len(version_parts) < 3: - raise ValueError( - "Version should at least contains three parts seperated with '.'. e.g 10.0.2-1" - ) - - third_part_splitted = version_parts[2].split("-") - - if pkg_name in ("hll", "azure_gdpr"): - package_version = f"{version_parts[0]}.{version_parts[1]}" - else: - package_version = ( - f"{version_parts[0]}.{version_parts[1]}.{third_part_splitted[0]}" - ) - return package_version - - -def get_package_version_without_release_stage_from_pkgvars(input_files_dir: str): - version = get_package_version_from_pkgvars(input_files_dir) - return tear_release_stage_from_package_version(version) - - -def get_exclude_dict(input_files_dir: str) -> Tuple[Dict, Dict]: - exclude_dict_release = {} - exclude_dict_nightly = {} - exclude_file_path = f"{input_files_dir}/{POSTGRES_EXCLUDE_FILE_NAME}" - if os.path.exists(exclude_file_path): - with open( - exclude_file_path, - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - yaml_content = yaml.load(reader, yaml.BaseLoader) - for os_release, pg_versions in yaml_content["exclude"]["release"].items(): - print(f"{os_release} {pg_versions}") - exclude_dict_release[os_release] = pg_versions - for os_release, pg_versions in yaml_content["exclude"]["nightly"].items(): - print(f"{os_release} {pg_versions}") - exclude_dict_nightly[os_release] = pg_versions - return exclude_dict_release, exclude_dict_nightly - - -def tear_release_stage_from_package_version(package_version: str) -> str: - return package_version.split("_")[0] - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--gh_token", required=True) - parser.add_argument("--platform", required=False, choices=platform_names()) - parser.add_argument( - "--packaging_docker_platform", - required=False, - choices=package_docker_platform_dict.keys(), - ) - parser.add_argument("--build_type", choices=[b.name for b in BuildType]) - parser.add_argument("--secret_key", required=True) - parser.add_argument("--passphrase", required=True) - parser.add_argument("--output_dir", required=True) - parser.add_argument("--input_files_dir", required=True) - parser.add_argument("--output_validation", action="store_true") - parser.add_argument("--is_test", action="store_true") - - args = parser.parse_args() - - if args.platform and args.packaging_docker_platform: - raise ValueError("Either platform or packaging_docker_platform should be set.") - build_platform = get_build_platform(args.platform, args.packaging_docker_platform) - - io_parameters = InputOutputParameters.build( - args.input_files_dir, args.output_dir, args.output_validation - ) - sign_credentials = SigningCredentials(args.secret_key, args.passphrase) - build_packages( - args.gh_token, - build_platform, - BuildType[args.build_type], - sign_credentials, - io_parameters, - args.is_test, - ) +import argparse +import glob +import os +import subprocess +from enum import Enum +from typing import Dict +from typing import List +from typing import Tuple + +import docker +import gnupg +import yaml +from attr import dataclass +from dotenv import dotenv_values +from parameters_validation import non_blank, non_empty, validate_parameters + +from .common_tool_methods import ( + DEFAULT_ENCODING_FOR_FILE_HANDLING, + DEFAULT_UNICODE_ERROR_HANDLER, + PackageType, + get_gpg_fingerprints_by_name, + get_supported_postgres_nightly_versions, + get_supported_postgres_release_versions, + platform_names, + run_with_output, + supported_platforms, + transform_key_into_base64_str, +) +from .packaging_warning_handler import validate_output + +GPG_KEY_NAME = "packaging@citusdata.com" + +POSTGRES_VERSION_FILE = "supported-postgres" +POSTGRES_MATRIX_FILE_NAME = "postgres-matrix.yml" +POSTGRES_EXCLUDE_FILE_NAME = "pg_exclude.yml" + +docker_image_names = { + "almalinux": "almalinux", + "rockylinux": "almalinux", + "debian": "debian", + "el/9": "almalinux-9", + "ol/9": "almalinux-9", + "el/8": "almalinux-8", + "el": "centos", + "ol": "oraclelinux", + "ubuntu": "ubuntu", + "pgxn": "pgxn", +} + +package_docker_platform_dict = { + "almalinux,9": "almalinux/9", + "almalinux,8": "almalinux/8", + "centos,8": "el/8", + "centos,7": "el/7", + "debian,bookworm": "debian/bookworm", + "debian,bullseye": "debian/bullseye", + "debian,buster": "debian/buster", + "debian,stretch": "debian/stretch", + "oraclelinux,8": "ol/8", + "oraclelinux,7": "ol/7", + "oraclelinux,6": "ol/6", + "ubuntu,focal": "ubuntu/focal", + "ubuntu,bionic": "ubuntu/bionic", + "ubuntu,jammy": "ubuntu/jammy", + "ubuntu,kinetic": "ubuntu/kinetic", + "pgxn": "pgxn", +} + + +def get_package_type_by_docker_image_name(docker_image_name: str) -> PackageType: + return ( + PackageType.deb + if docker_image_name.startswith(("ubuntu", "debian")) + else PackageType.rpm + ) + + +class BuildType(Enum): + release = 1 + nightly = 2 + + +class PostgresVersionDockerImageType(Enum): + multiple = 1 + single = 2 + + +platform_postgres_version_source = { + "el": PostgresVersionDockerImageType.multiple, + "ol": PostgresVersionDockerImageType.multiple, + "almalinux": PostgresVersionDockerImageType.multiple, + "debian": PostgresVersionDockerImageType.single, + "ubuntu": PostgresVersionDockerImageType.single, + "pgxn": PostgresVersionDockerImageType.single, +} + +PKGVARS_FILE = "pkgvars" +SINGLE_DOCKER_POSTGRES_PREFIX = "all" +PACKAGES_DIR_NAME = "packages" + + +def decode_os_and_release(platform_name: str) -> Tuple[str, str]: + parts = platform_name.split("/") + + if len(parts) == 0 or len(parts) > 2 or (len(parts) == 1 and parts[0] != "pgxn"): + raise ValueError( + "Platforms should have two parts divided by '/' or should be 'pgxn' " + ) + if len(parts) == 1 and parts[0] == "pgxn": + os_name = "pgxn" + os_release = "" + else: + os_name = parts[0] + os_release = parts[1] + if os_name not in supported_platforms: + raise ValueError( + f"{os_name} is not among supported operating systems. Supported operating systems are as below:\n " + f"{','.join(supported_platforms.keys())}" + ) + if os_release not in supported_platforms[os_name]: + raise ValueError( + f"{os_release} is not among supported releases for {os_name}." + f"Supported releases are as below:\n {','.join(supported_platforms[os_name])}" + ) + return os_name, os_release + + +def is_docker_running() -> bool: + try: + docker_client = docker.from_env() + docker_client.ping() + return True + ## Exception type is not defined in API so I keep as is + except: # noqa: E722 # pylint: disable=bare-except + return False + + +@dataclass +class SigningCredentials: + secret_key: str + passphrase: str + + +@dataclass +class InputOutputParameters: + input_files_dir: str + output_dir: str + output_validation: bool + + @staticmethod + @validate_parameters + # disabled since this is related to parameter_validations library methods + # pylint: disable=no-value-for-parameter + def build( + input_files_dir: non_empty(non_blank(str)), + output_dir: non_empty(non_blank(str)), + output_validation: bool = False, + ): + return InputOutputParameters( + input_files_dir=input_files_dir, + output_dir=output_dir, + output_validation=output_validation, + ) + + +@validate_parameters +# disabled since this is related to parameter_validations library methods +# pylint: disable=no-value-for-parameter +def get_signing_credentials( + packaging_secret_key: str, packaging_passphrase: non_empty(non_blank(str)) +) -> SigningCredentials: + if packaging_secret_key: + secret_key = packaging_secret_key + else: + fingerprints = get_gpg_fingerprints_by_name(GPG_KEY_NAME) + if len(fingerprints) == 0: + raise ValueError(f"Key for {GPG_KEY_NAME} does not exist") + + gpg = gnupg.GPG() + + private_key = gpg.export_keys( + fingerprints[0], secret=True, passphrase=packaging_passphrase + ) + secret_key = transform_key_into_base64_str(private_key) + + passphrase = packaging_passphrase + return SigningCredentials(secret_key=secret_key, passphrase=passphrase) + + +def write_postgres_versions_into_file( + input_files_dir: str, package_version: str, os_name: str = "", platform: str = "" +): + # In ADO pipelines function without os_name and platform is used. If these parameters are unset + if not os_name: + print("os name is empty") + release_versions = get_supported_postgres_release_versions( + f"{input_files_dir}/{POSTGRES_MATRIX_FILE_NAME}", package_version + ) + nightly_versions = get_supported_postgres_nightly_versions( + f"{input_files_dir}/{POSTGRES_MATRIX_FILE_NAME}" + ) + else: + print(f"os: {os_name} platform: {platform}") + release_versions, nightly_versions = get_postgres_versions( + platform=platform, input_files_dir=input_files_dir + ) + release_version_str = ",".join(release_versions) + nightly_version_str = ",".join(nightly_versions) + print( + f"Release versions: {release_version_str}, Nightly versions: {nightly_version_str}" + ) + with open( + f"{input_files_dir}/{POSTGRES_VERSION_FILE}", + "w", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as f: + f.write(f"release_versions={release_version_str}\n") + f.write(f"nightly_versions={nightly_version_str}\n") + + +def sign_packages( + sub_folder: str, + signing_credentials: SigningCredentials, + input_output_parameters: InputOutputParameters, +): + output_path = f"{input_output_parameters.output_dir}/{sub_folder}" + deb_files = glob.glob(f"{output_path}/*.deb", recursive=True) + rpm_files = glob.glob(f"{output_path}/*.rpm", recursive=True) + os.environ["PACKAGING_PASSPHRASE"] = signing_credentials.passphrase + os.environ["PACKAGING_SECRET_KEY"] = signing_credentials.secret_key + + if len(rpm_files) > 0: + print("Started RPM Signing...") + result = run_with_output( + f"docker run --rm -v {output_path}:/packages/{sub_folder} -e PACKAGING_SECRET_KEY -e " + f"PACKAGING_PASSPHRASE citusdata/packaging:rpmsigner", + text=True, + ) + output = result.stdout + print(f"Result:{output}") + + if result.returncode != 0: + raise ValueError(f"Error while signing rpm files.Err:{result.stderr}") + if input_output_parameters.output_validation: + validate_output( + output, + f"{input_output_parameters.input_files_dir}/packaging_ignore.yml", + PackageType.rpm, + ) + + print("RPM signing finished successfully.") + + if len(deb_files) > 0: + print("Started DEB Signing...") + + # output is required to understand the error if any so check parameter is not used + # pylint: disable=subprocess-run-check + result = subprocess.run( + [ + "docker", + "run", + "--rm", + "-v", + f"{output_path}:/packages/{sub_folder}", + "-e", + "PACKAGING_SECRET_KEY", + "-e", + "PACKAGING_PASSPHRASE", + "citusdata/packaging:debsigner", + ], + text=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + input=signing_credentials.passphrase, + ) + output = result.stdout + print(f"Result:{output}") + + if result.returncode != 0: + raise ValueError(f"Error while signing deb files.Err:{result.stdout}") + + if input_output_parameters.output_validation: + validate_output( + result.stdout, + f"{input_output_parameters.input_files_dir}/packaging_ignore.yml", + PackageType.deb, + ) + + print("DEB signing finished successfully.") + + +def get_postgres_versions( + platform: str, input_files_dir: str +) -> Tuple[List[str], List[str]]: + package_version = get_package_version_without_release_stage_from_pkgvars( + input_files_dir + ) + release_versions = get_supported_postgres_release_versions( + f"{input_files_dir}/{POSTGRES_MATRIX_FILE_NAME}", package_version + ) + nightly_versions = get_supported_postgres_nightly_versions( + f"{input_files_dir}/{POSTGRES_MATRIX_FILE_NAME}" + ) + + exclude_dict_release, exclude_dict_nightly = get_exclude_dict( + input_files_dir=input_files_dir + ) + + platform_key_release = "all" if "all" in exclude_dict_release else platform + platform_key_nightly = "all" if "all" in exclude_dict_nightly else platform + + if exclude_dict_release and platform_key_release in exclude_dict_release: + release_versions = [ + v + for v in release_versions + if v not in exclude_dict_release[platform_key_release] + ] + + if exclude_dict_nightly and platform_key_nightly in exclude_dict_nightly: + nightly_versions = [ + v + for v in release_versions + if v not in exclude_dict_nightly[platform_key_nightly] + ] + + return release_versions, nightly_versions + + +@validate_parameters +# disabled since this is related to parameter_validations library methods +# pylint: disable=no-value-for-parameter +def build_package( + github_token: non_empty(non_blank(str)), + build_type: BuildType, + docker_platform: str, + postgres_version: str, + input_output_parameters: InputOutputParameters, + is_test: bool = False, +): + docker_image_name = "packaging" if not is_test else "packaging-test" + postgres_extension = "all" if postgres_version == "all" else f"pg{postgres_version}" + os.environ["GITHUB_TOKEN"] = github_token + os.environ["CONTAINER_BUILD_RUN_ENABLED"] = "true" + if not os.path.exists(input_output_parameters.output_dir): + os.makedirs(input_output_parameters.output_dir) + + docker_command = ( + f"docker run --rm -v {input_output_parameters.output_dir}:/packages -v " + f"{input_output_parameters.input_files_dir}:/buildfiles:ro " + f"-e GITHUB_TOKEN -e PACKAGE_ENCRYPTION_KEY -e UNENCRYPTED_PACKAGE -e CONTAINER_BUILD_RUN_ENABLED " + f"-e MSRUSTUP_PAT -e CRATES_IO_MIRROR_FEED_TOKEN -e INSTALL_RUST -e CI " + f"citus/{docker_image_name}:{docker_platform}-{postgres_extension} {build_type.name}" + ) + + print(f"Executing docker command: {docker_command}") + output = run_with_output(docker_command, text=True) + + if output.stdout: + print("Output:" + output.stdout) + if output.returncode != 0: + raise ValueError(output.stderr) + + if input_output_parameters.output_validation: + validate_output( + output.stdout, + f"{input_output_parameters.input_files_dir}/packaging_ignore.yml", + get_package_type_by_docker_image_name(docker_platform), + ) + + +def get_release_package_folder_name(os_name: str, os_version: str) -> str: + return f"{os_name}-{os_version}" + + +# Gets the docker image name for the given platform. +# Normally, the docker image name has one to one matching with os name. +# However, there are some exceptions for this rule. For example, docker image name for both el-9 and ol-9 is +# almalinux-9. This is because, both el/9 and ol/9 platforms can use packages built on almalinux-9 docker image. +def get_docker_image_name(platform: str): + if platform in docker_image_names: + return docker_image_names[platform] + os_name, os_version = decode_os_and_release(platform) + return ( + f"{docker_image_names[os_name]}-{os_version}" + if os_version + else f"{docker_image_names[os_name]}" + ) + + +@validate_parameters +# disabled since this is related to parameter_validations library methods +# pylint: disable=no-value-for-parameter +# pylint: disable= too-many-locals +def build_packages( + github_token: non_empty(non_blank(str)), + platform: non_empty(non_blank(str)), + build_type: BuildType, + signing_credentials: SigningCredentials, + input_output_parameters: InputOutputParameters, + is_test: bool = False, +) -> None: + os_name, os_version = decode_os_and_release(platform) + release_versions, nightly_versions = get_postgres_versions( + platform, input_output_parameters.input_files_dir + ) + + signing_credentials = get_signing_credentials( + signing_credentials.secret_key, signing_credentials.passphrase + ) + + if platform != "pgxn": + package_version = get_package_version_without_release_stage_from_pkgvars( + input_output_parameters.input_files_dir + ) + write_postgres_versions_into_file( + input_output_parameters.input_files_dir, package_version, os_name, platform + ) + + if not signing_credentials.passphrase: + raise ValueError("PACKAGING_PASSPHRASE should not be null or empty") + postgress_versions_to_process = ( + release_versions if build_type == BuildType.release else nightly_versions + ) + + if ( + platform_postgres_version_source[os_name] + == PostgresVersionDockerImageType.single + ): + postgres_docker_extension_iterator = ["all"] + else: + postgres_docker_extension_iterator = postgress_versions_to_process + + docker_image_name = get_docker_image_name(platform) + output_sub_folder = get_release_package_folder_name(os_name, os_version) + input_output_parameters.output_dir = ( + f"{input_output_parameters.output_dir}/{output_sub_folder}" + ) + for postgres_docker_extension in postgres_docker_extension_iterator: + print( + f"Package build for {os_name}-{os_version} for postgres {postgres_docker_extension} started... " + ) + build_package( + github_token, + build_type, + docker_image_name, + postgres_docker_extension, + input_output_parameters, + is_test, + ) + print( + f"Package build for {os_name}-{os_version} for postgres {postgres_docker_extension} finished " + ) + + sign_packages(output_sub_folder, signing_credentials, input_output_parameters) + + +def get_build_platform(packaging_platform: str, packaging_docker_platform: str) -> str: + return ( + package_docker_platform_dict[packaging_docker_platform] + if packaging_docker_platform + else packaging_platform + ) + + +def get_package_version_from_pkgvars(input_files_dir: str): + pkgvars_config = dotenv_values(f"{input_files_dir}/pkgvars") + package_version_with_suffix = pkgvars_config["pkglatest"] + version_parts = package_version_with_suffix.split(".") + # hll is working with minor release format e.g. 2.16.citus-1 + pkg_name = pkgvars_config["pkgname"] + + if len(version_parts) < 3: + raise ValueError( + "Version should at least contains three parts seperated with '.'. e.g 10.0.2-1" + ) + + third_part_splitted = version_parts[2].split("-") + + if pkg_name in ("hll", "azure_gdpr"): + package_version = f"{version_parts[0]}.{version_parts[1]}" + else: + package_version = ( + f"{version_parts[0]}.{version_parts[1]}.{third_part_splitted[0]}" + ) + return package_version + + +def get_package_version_without_release_stage_from_pkgvars(input_files_dir: str): + version = get_package_version_from_pkgvars(input_files_dir) + return tear_release_stage_from_package_version(version) + + +def get_exclude_dict(input_files_dir: str) -> Tuple[Dict, Dict]: + exclude_dict_release = {} + exclude_dict_nightly = {} + exclude_file_path = f"{input_files_dir}/{POSTGRES_EXCLUDE_FILE_NAME}" + if os.path.exists(exclude_file_path): + with open( + exclude_file_path, + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + yaml_content = yaml.load(reader, yaml.BaseLoader) + for os_release, pg_versions in yaml_content["exclude"]["release"].items(): + print(f"{os_release} {pg_versions}") + exclude_dict_release[os_release] = pg_versions + for os_release, pg_versions in yaml_content["exclude"]["nightly"].items(): + print(f"{os_release} {pg_versions}") + exclude_dict_nightly[os_release] = pg_versions + return exclude_dict_release, exclude_dict_nightly + + +def tear_release_stage_from_package_version(package_version: str) -> str: + return package_version.split("_")[0] + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--gh_token", required=True) + parser.add_argument("--platform", required=False, choices=platform_names()) + parser.add_argument( + "--packaging_docker_platform", + required=False, + choices=package_docker_platform_dict.keys(), + ) + parser.add_argument("--build_type", choices=[b.name for b in BuildType]) + parser.add_argument("--secret_key", required=True) + parser.add_argument("--passphrase", required=True) + parser.add_argument("--output_dir", required=True) + parser.add_argument("--input_files_dir", required=True) + parser.add_argument("--output_validation", action="store_true") + parser.add_argument("--is_test", action="store_true") + + args = parser.parse_args() + + if args.platform and args.packaging_docker_platform: + raise ValueError("Either platform or packaging_docker_platform should be set.") + build_platform = get_build_platform(args.platform, args.packaging_docker_platform) + + io_parameters = InputOutputParameters.build( + args.input_files_dir, args.output_dir, args.output_validation + ) + sign_credentials = SigningCredentials(args.secret_key, args.passphrase) + build_packages( + args.gh_token, + build_platform, + BuildType[args.build_type], + sign_credentials, + io_parameters, + args.is_test, + ) diff --git a/packaging_automation/common_tool_methods.py b/packaging_automation/common_tool_methods.py index a63d3017..e24f8f42 100644 --- a/packaging_automation/common_tool_methods.py +++ b/packaging_automation/common_tool_methods.py @@ -1,797 +1,797 @@ -import base64 -import os -import re -import shlex -import subprocess -from datetime import datetime -from enum import Enum -from typing import Dict, List, Tuple - -import git -import gnupg -import pathlib2 -import requests -import yaml -from git import GitCommandError, Repo -from github import Commit, Github, PullRequest, Repository -from jinja2 import Environment, FileSystemLoader -from parameters_validation import validate_parameters - -from .common_validations import is_tag, is_version -from .dbconfig import RequestLog, RequestType - -BASE_GIT_PATH = pathlib2.Path(__file__).parents[1] -PATCH_VERSION_MATCH_FROM_MINOR_SUFFIX = r"\.\d{1,3}" -POSTGRES_MATRIX_FLIE_NAME = "postgres-matrix.yml" -PATCH_VERSION_MATCH_FROM_MINOR_SUFFIX = r"\.\d{1,3}" - -# http://python-notes.curiousefficiency.org/en/latest/python3/text_file_processing.html -# https://bleepcoder.com/pylint/698183789/pep-597-require-encoding-kwarg-in-open-call-and-other-calls -# Parameterized to fix pylint unspecified-encoding error -DEFAULT_ENCODING_FOR_FILE_HANDLING = "utf8" -DEFAULT_UNICODE_ERROR_HANDLER = "surrogateescape" - -# When using GitPython library Repo objects should be closed to be able to delete cloned sources -# referenced by Repo objects.References are stored in below array to be able to close -# all resources after the code execution. -referenced_repos: List[Repo] = [] - -supported_platforms = { - "debian": ["bookworm", "bullseye", "buster", "stretch", "jessie", "wheezy"], - "almalinux": ["8", "9"], - "el": ["9", "8", "7", "6"], - "ol": ["9", "8", "7"], - "ubuntu": ["focal", "bionic", "trusty", "jammy", "kinetic"], -} - - -def platform_names() -> List[str]: - platforms = [] - for platform_os, platform_releases in supported_platforms.items(): - for platform_release in platform_releases: - platforms.append(f"{platform_os}/{platform_release}") - platforms.append("pgxn") - return platforms - - -def get_new_repo(working_dir: str) -> Repo: - repo = Repo(working_dir) - referenced_repos.append(repo) - return repo - - -def release_all_repos(): - for repo in referenced_repos: - repo.close() - - -class PackageType(Enum): - deb = 1 - rpm = 2 - - -class GpgKeyType(Enum): - private = 1 - public = 2 - - -BASE_PATH = pathlib2.Path(__file__).parents[1] - - -def get_spec_file_name(project_name: str) -> str: - return f"{project_name}.spec" - - -def get_minor_project_version(project_version: str) -> str: - project_version_details = get_version_details(project_version) - return f'{project_version_details["major"]}.{project_version_details["minor"]}' - - -def project_version_contains_release_stage(project_version: str) -> bool: - return "_" in project_version - - -def get_minor_project_version_for_docker(project_version: str) -> str: - project_version_details = get_version_details(project_version) - minor_version = ( - f'{project_version_details["major"]}.{project_version_details["minor"]}' - ) - if project_version_contains_release_stage(project_version): - return f'{project_version_details["stage"]}-{minor_version}' - return minor_version - - -def append_fancy_suffix_to_version(version: str, fancy_release_number: int) -> str: - fancy_suffix = f"-{fancy_release_number}" - return f"{version}{fancy_suffix}" - - -def append_project_name_to_version(project_name: str, version: str) -> str: - return f"{version}.{project_name}" - - -def get_project_version_from_tag_name(tag_name: is_tag(str)) -> str: - return tag_name[1:] - - -def get_template_environment(template_dir: str) -> Environment: - file_loader = FileSystemLoader(template_dir) - env = Environment(loader=file_loader) - return env - - -def find_nth_occurrence_position(subject_string: str, search_string: str, n) -> int: - start = subject_string.find(search_string) - - while start >= 0 and n > 1: - start = subject_string.find(search_string, start + 1) - n -= 1 - return start - - -def find_nth_matching_line_and_line_number( - subject_string: str, regex_pattern: str, n: int -) -> Tuple[int, str]: - """Takes a subject string, regex param and the search index as parameter and returns line number of found match. - If not found returns -1""" - lines = subject_string.splitlines() - counter = 0 - for line_number, line in enumerate(lines): - if re.match(regex_pattern, line): - counter = counter + 1 - if counter == n: - return line_number, lines[line_number] - return -1, "" - - -def remove_text_with_parenthesis(param: str) -> str: - """Removes texts within parenthesis i.e. outside parenthesis(inside parenthesis)-> outside parenthesis""" - return re.sub(r"[(\[].*?[)\]]", "", param) - - -def run(command, *args, **kwargs): - result = subprocess.run(shlex.split(command), *args, check=True, **kwargs) - return result - - -def run_with_output(command, *args, **kwargs): - # this method's main objective is to return output. Therefore it is caller's responsibility to handle - # success status - # pylint: disable=subprocess-run-check - result = subprocess.run(shlex.split(command), *args, capture_output=True, **kwargs) - return result - - -def cherry_pick_prs(prs: List[PullRequest.PullRequest]): - for pr in prs: - commits = pr.get_commits() - for single_commit in commits: - if not is_merge_commit(single_commit): - cp_result = run(f"git cherry-pick -x {single_commit.commit.sha}") - print( - f"Cherry pick result for PR no {pr.number} and commit sha {single_commit.commit.sha}: {cp_result}" - ) - - -def get_minor_version(version: str) -> str: - project_version_details = get_version_details(version) - return f'{project_version_details["major"]}.{project_version_details["minor"]}' - - -@validate_parameters -def get_patch_version_regex(version: is_version(str)): - return rf"^{re.escape(get_minor_version(version))}{PATCH_VERSION_MATCH_FROM_MINOR_SUFFIX}$" - - -def is_merge_commit(commit: Commit): - return len(commit.parents) <= 1 - - -@validate_parameters -def get_version_details(version: is_version(str)) -> Dict[str, str]: - version_parts = version.split(".") - release_stage = "stable" - assert len(version_parts) == 3 - if project_version_contains_release_stage(version): - stage_parts = version_parts[2].split("_") - release_stage = stage_parts[1] - return { - "major": version_parts[0], - "minor": version_parts[1], - "patch": version_parts[2], - "stage": release_stage, - } - - -@validate_parameters -def get_upcoming_patch_version(version: is_version(str)) -> str: - project_version_details = get_version_details(version) - return f'{get_upcoming_minor_version(version)}.{int(project_version_details["patch"]) + 1}' - - -@validate_parameters -def get_upcoming_minor_version(version: is_version(str)) -> str: - project_version_details = get_version_details(version) - return f'{project_version_details["major"]}.{int(project_version_details["minor"]) + 1}' - - -def get_last_commit_message(path: str) -> str: - repo = get_new_repo(path) - commit = repo.head.commit - return commit.message - - -@validate_parameters -def is_major_release(version: is_version(str)) -> bool: - version_info = get_version_details(version) - return version_info["patch"] == "0" - - -def str_array_to_str(str_array: List[str]) -> str: - return f"{os.linesep.join(str_array)}{os.linesep}" - - -def get_prs_for_patch_release( - repo: Repository.Repository, - earliest_date: datetime, - base_branch: str, - last_date: datetime = None, -): - pull_requests = repo.get_pulls( - state="closed", base=base_branch, sort="created", direction="desc" - ) - - # filter pull requests according to given time interval - filtered_pull_requests = [] - for pull_request in pull_requests: - # FIXME: We hit to API rate limit when using `.merged`, so we use `.merged_at` here - if not pull_request.merged_at: - continue - if pull_request.merged_at < earliest_date: - continue - if last_date and pull_request.merged_at > last_date: - continue - - filtered_pull_requests.append(pull_request) - - # finally, sort the pr's by their merge date - sorted_pull_requests = sorted(filtered_pull_requests, key=lambda p: p.merged_at) - return sorted_pull_requests - - -def filter_prs_by_label(prs: List[PullRequest.PullRequest], label_name: str): - filtered_prs = [] - for pr in prs: - if any(label.name == label_name for label in pr.labels): - filtered_prs.append(pr) - return filtered_prs - - -def file_includes_line( - base_path: str, relative_file_path: str, line_content: str -) -> bool: - with open( - f"{base_path}/{relative_file_path}", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - content = reader.read() - lines = content.splitlines() - for line in lines: - if line == line_content: - return True - return False - - -def count_line_in_file( - base_path: str, relative_file_path: str, search_line: str -) -> int: - with open( - f"{base_path}/{relative_file_path}", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - content = reader.read() - lines = content.splitlines() - return len(list(filter(lambda line: line == search_line, lines))) - - -def replace_line_in_file(file: str, match_regex: str, replace_str: str) -> bool: - with open( - file, - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - file_content = reader.read() - lines = file_content.splitlines() - has_match = False - for line_number, line in enumerate(lines): - if re.match(match_regex, line.strip()): - has_match = True - lines[line_number] = replace_str - edited_content = str_array_to_str(lines) - with open( - file, - "w", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as writer: - writer.write(edited_content) - - return has_match - - -def append_line_in_file(file: str, match_regex: str, append_str: str) -> bool: - with open( - file, - "r+", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - file_content = reader.read() - lines = file_content.splitlines() - has_match = False - copy_lines = lines.copy() - appended_line_index = 0 - for line_number, line in enumerate(lines): - if re.match(match_regex, line.strip()): - has_match = True - - if line_number + 1 < len(lines): - copy_lines[appended_line_index + 1] = append_str - # Since line is added after matched string, shift index start with line_number+1 - # increment of appended_line_index is 2 since copy_lines appended_line_index+1 includes - # append_str - lines_to_be_shifted = lines[line_number + 1 :] - copy_lines = ( - copy_lines[0 : appended_line_index + 2] + lines_to_be_shifted - ) - else: - copy_lines.append(append_str) - appended_line_index = appended_line_index + 1 - edited_content = str_array_to_str(copy_lines) - with open( - file, - "w", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as writer: - writer.write(edited_content) - - return has_match - - -def prepend_line_in_file(file: str, match_regex: str, append_str: str) -> bool: - with open( - file, - "r+", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - file_content = reader.read() - lines = file_content.splitlines() - has_match = False - copy_lines = lines.copy() - prepended_line_index = 0 - for line_number, line in enumerate(lines): - if re.match(match_regex, line.strip()): - has_match = True - copy_lines[prepended_line_index] = append_str - # Since line is added before matched string shift index start with line_number - # increment of prepend_line_index is 1 line after prepended_line_index should be shifted - lines_to_be_shifted = lines[line_number:] - copy_lines = ( - copy_lines[0 : prepended_line_index + 1] + lines_to_be_shifted - ) - prepended_line_index = prepended_line_index + 1 - edited_content = str_array_to_str(copy_lines) - with open( - file, - "w", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as writer: - writer.write(edited_content) - - return has_match - - -def is_tag_on_branch(tag_name: str, branch_name: str): - g = git.Git(os.getcwd()) - try: - branches_str = g.execute(["git", "branch", "--contains", f"tags/{tag_name}"]) - branches = branches_str.split("\n") - print("Branches str:" + branches_str) - if len(branches) > 0: - for branch in branches: - if remove_prefix(branch, "*").strip() == branch_name: - return True - return False - except GitCommandError as e: - print("Error:" + str(e)) - return False - - -def get_current_branch(working_dir: str) -> str: - repo = get_new_repo(working_dir) - return repo.active_branch.name - - -def remote_branch_exists(branch_name: str, working_dir: str) -> bool: - repo = get_new_repo(working_dir) - for rp in repo.references: - if rp.name.endswith(f"/{branch_name}"): - return True - return False - - -def local_branch_exists(branch_name: str, working_dir: str) -> bool: - repo = get_new_repo(working_dir) - for rp in repo.branches: - if rp.name == branch_name: - return True - return False - - -def branch_exists(branch_name: str, working_dir: str) -> bool: - return local_branch_exists(branch_name, working_dir) or remote_branch_exists( - branch_name, working_dir - ) - - -def remove_cloned_code(exec_path: str): - release_all_repos() - if os.path.exists(f"{exec_path}"): - print(f"Deleting cloned code {exec_path} ...") - # https://stackoverflow.com/questions/51819472/git-cant-delete-local-branch-operation-not-permitted - # https://askubuntu.com/questions/1049142/cannot-delete-git-directory - # since git directory is readonly first we need to give write permission to delete git directory - if os.path.exists(f"{exec_path}/.git"): - run(f"chmod -R 777 {exec_path}/.git") - try: - run(f"rm -rf {exec_path}") - print("Done. Code deleted successfully.") - except subprocess.CalledProcessError: - print( - f"Some files could not be deleted in directory {exec_path}. " - f"Please delete them manually or they will be deleted before next execution" - ) - - -def process_template_file_with_minor( - project_version: str, - templates_path: str, - template_file_path: str, - minor_version: str, - postgres_version: str = "", -): - """This function gets the template files, changes tha parameters inside the file and returns the output. - Template files are stored under packaging_automation/templates and these files include parametric items in the - format of {{parameter_name}}. This function is used while creating docker files and pgxn files which include - "project_name" as parameter. Example usage is in "test_common_tool_methods/test_process_template_file". - Jinja2 is used as th the template engine and render function gets the file change parameters in the file - with the given input parameters and returns the output.""" - env = get_template_environment(templates_path) - template = env.get_template(template_file_path) - rendered_output = template.render( - project_version=project_version, - postgres_version=postgres_version, - project_minor_version=minor_version, - ) - return f"{rendered_output}\n" - - -def process_template_file( - project_version: str, - templates_path: str, - template_file_path: str, - postgres_version: str = "", -): - minor_version = get_minor_project_version(project_version) - return process_template_file_with_minor( - project_version, - templates_path, - template_file_path, - minor_version, - postgres_version, - ) - - -def write_to_file(content: str, dest_file_name: str): - with open( - dest_file_name, - "w+", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as writer: - writer.write(content) - - -def get_gpg_fingerprints_by_name(name: str) -> List[str]: - """Returns GPG fingerprint by its unique key name. We use this function to determine the fingerprint that - we should use when signing packages""" - result = subprocess.run( - shlex.split("gpg --list-keys"), check=True, stdout=subprocess.PIPE - ) - lines = result.stdout.decode("ascii").splitlines() - finger_prints = [] - previous_line = "" - for line in lines: - if line.startswith("uid") and name in line: - finger_prints.append(previous_line.strip()) - continue - previous_line = line - return finger_prints - - -def delete_gpg_key_by_name(name: str, key_type: GpgKeyType): - keys = get_gpg_fingerprints_by_name(name) - - # There could be more than one key with the same name. For statement is used to delete all the public keys - # until no key remains (i.e. key_id is empty). - # Public and private keys are stored with the same fingerprint. In some cases one of them may not be exist. - # Therefore non-existence case is possible - for key_id in keys: - if key_type == GpgKeyType.public: - delete_command = f"gpg --batch --yes --delete-key {key_id}" - elif key_type == GpgKeyType.private: - delete_command = f"gpg --batch --yes --delete-secret-key {key_id}" - else: - raise ValueError("Unsupported Gpg key type") - output = run_with_output(delete_command) - if output.returncode == 0: - print(f"{key_type.name.capitalize()} key with the id {key_id} deleted") - elif output.returncode == 2: - # Key does not exist in keyring - continue - else: - print(f"Error {output.stderr.decode('ascii')}") - break - - -def delete_public_gpg_key_by_name(name: str): - delete_gpg_key_by_name(name, GpgKeyType.public) - - -def delete_private_gpg_key_by_name(name: str): - delete_gpg_key_by_name(name, GpgKeyType.private) - - -def delete_all_gpg_keys_by_name(name: str): - delete_private_gpg_key_by_name(name) - delete_public_gpg_key_by_name(name) - - -def get_private_key_by_fingerprint_without_passphrase(fingerprint: str) -> str: - gpg = gnupg.GPG() - - private_key = gpg.export_keys(fingerprint, secret=True, expect_passphrase=False) - if not private_key: - raise ValueError( - "Error while getting key. Most probably packaging key is stored with passphrase. " - "Please check the passphrase and try again" - ) - return private_key - - -def get_private_key_by_fingerprint_with_passphrase( - fingerprint: str, passphrase: str -) -> str: - gpg = gnupg.GPG() - - private_key = gpg.export_keys(fingerprint, secret=True, passphrase=passphrase) - if not private_key: - raise ValueError( - "Error while getting key. Most probably packaging key is stored with passphrase. " - "Please check the passphrase and try again" - ) - return private_key - - -def transform_key_into_base64_str(key: str) -> str: - # while signing packages base64 encoded string is required. So first we encode key with ascii and create a - # byte array than encode it with base64 and decode it with ascii to get the required output - return base64.b64encode(key.encode("ascii")).decode("ascii") - - -def define_rpm_public_key_to_machine(fingerprint: str): - with open( - "rpm_public.key", - "w", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as writer: - subprocess.run( - shlex.split(f"gpg --export -a {fingerprint}"), stdout=writer, check=True - ) - run("rpm --import rpm_public.key") - os.remove("rpm_public.key") - - -def delete_rpm_key_by_name(summary: str): - rpm_keys = get_rpm_keys() - for key in rpm_keys: - if rpm_key_matches_summary(key, summary): - run(f"rpm -e {key}") - print(f"RPM key with id {key} was deleted") - - -def get_rpm_keys(): - result = run_with_output("rpm -q gpg-pubkey") - if result.stderr: - raise ValueError(f"Error:{result.stderr.decode('ascii')}") - output = result.stdout.decode("ascii") - key_lines = output.splitlines() - return key_lines - - -def rpm_key_matches_summary(key: str, summary: str): - result = run_with_output("rpm -q " + key + " --qf '%{SUMMARY}'") - if result.stderr: - raise ValueError(f"Error:{result.stderr.decode('ascii')}") - output = result.stdout.decode("ascii") - return summary in output - - -def is_rpm_file_signed(file_path: str) -> bool: - result = run_with_output(f"rpm -K {file_path}") - return result.returncode == 0 - - -def verify_rpm_signature_in_dir(rpm_dir_path: str): - files = [] - for dirpath, _, filenames in os.walk(rpm_dir_path): - files += [os.path.join(dirpath, file) for file in filenames] - rpm_files = filter(lambda file_name: file_name.endswith("rpm"), files) - for file in rpm_files: - if not is_rpm_file_signed(f"{file}"): - raise ValueError( - f"File {file} is not signed or there is a signature check problem" - ) - - -def remove_prefix(text, prefix): - if text.startswith(prefix): - result_str = text[len(prefix) :] - else: - result_str = text - return result_str - - -def remove_suffix(initial_str: str, suffix: str) -> str: - if initial_str.endswith(suffix): - result_str = initial_str[: -len(suffix)] - else: - result_str = initial_str - return result_str - - -def initialize_env(exec_path: str, project_name: str, checkout_dir: str): - remove_cloned_code(f"{exec_path}/{checkout_dir}") - if not os.path.exists(checkout_dir): - run(f"git clone https://github.com/citusdata/{project_name}.git {checkout_dir}") - - -def create_pr( - gh_token: str, - pr_branch: str, - pr_title: str, - repo_owner: str, - project_name: str, - base_branch: str, -): - g = Github(gh_token) - repository = g.get_repo(f"{repo_owner}/{project_name}") - create_pr_with_repo( - repo=repository, pr_branch=pr_branch, pr_title=pr_title, base_branch=base_branch - ) - - -def create_pr_with_repo( - repo: Repository, pr_branch: str, pr_title: str, base_branch: str -): - return repo.create_pull(title=pr_title, base=base_branch, head=pr_branch, body="") - - -def stat_get_request(request_address: str, request_type: RequestType, session): - request_log = RequestLog(request_time=datetime.now(), request_type=request_type) - session.add(request_log) - session.commit() - try: - result = requests.get(request_address, timeout=60) - request_log.status_code = result.status_code - request_log.response = result.content.decode("ascii") - except requests.exceptions.RequestException as e: - result = e.response - request_log.status_code = -1 - request_log.response = ( - e.response.content.decode("ascii") - if e.response.content.decode("ascii") - else str(e) - ) - finally: - session.commit() - return result - - -def get_supported_postgres_release_versions( - postgres_matrix_conf_file_path: str, package_version: is_version(str) -) -> List[str]: - with open( - postgres_matrix_conf_file_path, - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - yaml_content = yaml.load(reader, yaml.BaseLoader) - - versions_dictionary = {} - for version_info in yaml_content["version_matrix"]: - versions_dictionary[list(version_info.keys())[0]] = version_info[ - list(version_info.keys())[0] - ]["postgres_versions"] - release_versions = match_release_version(versions_dictionary, package_version) - - return release_versions - - -def get_supported_postgres_nightly_versions( - postgres_matrix_conf_file_path: str, -) -> List[str]: - with open( - postgres_matrix_conf_file_path, - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - yaml_content = yaml.load(reader, yaml.BaseLoader) - - # nightly version is the last element in the postgres matrix - latest_version_info = yaml_content["version_matrix"][-1] - nightly_versions = latest_version_info[list(latest_version_info.keys())[0]][ - "postgres_versions" - ] - return nightly_versions - - -def match_release_version(versions_dictionary, package_version: str): - versions = list(versions_dictionary.keys()) - numeric_versions_of_config: Dict[int, str] = {} - for version in versions: - numeric_versions_of_config[ - get_numeric_counterpart_of_version(version) - ] = version - package_version_numeric = get_numeric_counterpart_of_version(package_version) - - if package_version_numeric in numeric_versions_of_config: - version_in_str = numeric_versions_of_config[package_version_numeric] - else: - last_smallest_version = -1 - for numeric_version in numeric_versions_of_config: - if numeric_version > package_version_numeric: - break - - last_smallest_version = numeric_version - - if last_smallest_version < 0: - version_in_str = versions[0] - else: - version_in_str = numeric_versions_of_config[last_smallest_version] - - return versions_dictionary[version_in_str] - - -def get_numeric_counterpart_of_version(package_version: str): - numbers_in_version = package_version.split(".") - # add a 0 if version is minor to calculate and match for patch releases accurately - if len(numbers_in_version) == 2: - numbers_in_version.append("0") - multiplier = 1 - numeric_counterpart = 0 - for num in reversed(numbers_in_version): - numeric_counterpart = numeric_counterpart + int(num) * multiplier - multiplier = multiplier * 100 - return numeric_counterpart +import base64 +import os +import re +import shlex +import subprocess +from datetime import datetime +from enum import Enum +from typing import Dict, List, Tuple + +import git +import gnupg +import pathlib2 +import requests +import yaml +from git import GitCommandError, Repo +from github import Commit, Github, PullRequest, Repository +from jinja2 import Environment, FileSystemLoader +from parameters_validation import validate_parameters + +from .common_validations import is_tag, is_version +from .dbconfig import RequestLog, RequestType + +BASE_GIT_PATH = pathlib2.Path(__file__).parents[1] +PATCH_VERSION_MATCH_FROM_MINOR_SUFFIX = r"\.\d{1,3}" +POSTGRES_MATRIX_FLIE_NAME = "postgres-matrix.yml" +PATCH_VERSION_MATCH_FROM_MINOR_SUFFIX = r"\.\d{1,3}" + +# http://python-notes.curiousefficiency.org/en/latest/python3/text_file_processing.html +# https://bleepcoder.com/pylint/698183789/pep-597-require-encoding-kwarg-in-open-call-and-other-calls +# Parameterized to fix pylint unspecified-encoding error +DEFAULT_ENCODING_FOR_FILE_HANDLING = "utf8" +DEFAULT_UNICODE_ERROR_HANDLER = "surrogateescape" + +# When using GitPython library Repo objects should be closed to be able to delete cloned sources +# referenced by Repo objects.References are stored in below array to be able to close +# all resources after the code execution. +referenced_repos: List[Repo] = [] + +supported_platforms = { + "debian": ["bookworm", "bullseye", "buster", "stretch", "jessie", "wheezy"], + "almalinux": ["8", "9"], + "el": ["9", "8", "7", "6"], + "ol": ["9", "8", "7"], + "ubuntu": ["focal", "bionic", "trusty", "jammy", "kinetic"], +} + + +def platform_names() -> List[str]: + platforms = [] + for platform_os, platform_releases in supported_platforms.items(): + for platform_release in platform_releases: + platforms.append(f"{platform_os}/{platform_release}") + platforms.append("pgxn") + return platforms + + +def get_new_repo(working_dir: str) -> Repo: + repo = Repo(working_dir) + referenced_repos.append(repo) + return repo + + +def release_all_repos(): + for repo in referenced_repos: + repo.close() + + +class PackageType(Enum): + deb = 1 + rpm = 2 + + +class GpgKeyType(Enum): + private = 1 + public = 2 + + +BASE_PATH = pathlib2.Path(__file__).parents[1] + + +def get_spec_file_name(project_name: str) -> str: + return f"{project_name}.spec" + + +def get_minor_project_version(project_version: str) -> str: + project_version_details = get_version_details(project_version) + return f'{project_version_details["major"]}.{project_version_details["minor"]}' + + +def project_version_contains_release_stage(project_version: str) -> bool: + return "_" in project_version + + +def get_minor_project_version_for_docker(project_version: str) -> str: + project_version_details = get_version_details(project_version) + minor_version = ( + f'{project_version_details["major"]}.{project_version_details["minor"]}' + ) + if project_version_contains_release_stage(project_version): + return f'{project_version_details["stage"]}-{minor_version}' + return minor_version + + +def append_fancy_suffix_to_version(version: str, fancy_release_number: int) -> str: + fancy_suffix = f"-{fancy_release_number}" + return f"{version}{fancy_suffix}" + + +def append_project_name_to_version(project_name: str, version: str) -> str: + return f"{version}.{project_name}" + + +def get_project_version_from_tag_name(tag_name: is_tag(str)) -> str: + return tag_name[1:] + + +def get_template_environment(template_dir: str) -> Environment: + file_loader = FileSystemLoader(template_dir) + env = Environment(loader=file_loader) + return env + + +def find_nth_occurrence_position(subject_string: str, search_string: str, n) -> int: + start = subject_string.find(search_string) + + while start >= 0 and n > 1: + start = subject_string.find(search_string, start + 1) + n -= 1 + return start + + +def find_nth_matching_line_and_line_number( + subject_string: str, regex_pattern: str, n: int +) -> Tuple[int, str]: + """Takes a subject string, regex param and the search index as parameter and returns line number of found match. + If not found returns -1""" + lines = subject_string.splitlines() + counter = 0 + for line_number, line in enumerate(lines): + if re.match(regex_pattern, line): + counter = counter + 1 + if counter == n: + return line_number, lines[line_number] + return -1, "" + + +def remove_text_with_parenthesis(param: str) -> str: + """Removes texts within parenthesis i.e. outside parenthesis(inside parenthesis)-> outside parenthesis""" + return re.sub(r"[(\[].*?[)\]]", "", param) + + +def run(command, *args, **kwargs): + result = subprocess.run(shlex.split(command), *args, check=True, **kwargs) + return result + + +def run_with_output(command, *args, **kwargs): + # this method's main objective is to return output. Therefore it is caller's responsibility to handle + # success status + # pylint: disable=subprocess-run-check + result = subprocess.run(shlex.split(command), *args, capture_output=True, **kwargs) + return result + + +def cherry_pick_prs(prs: List[PullRequest.PullRequest]): + for pr in prs: + commits = pr.get_commits() + for single_commit in commits: + if not is_merge_commit(single_commit): + cp_result = run(f"git cherry-pick -x {single_commit.commit.sha}") + print( + f"Cherry pick result for PR no {pr.number} and commit sha {single_commit.commit.sha}: {cp_result}" + ) + + +def get_minor_version(version: str) -> str: + project_version_details = get_version_details(version) + return f'{project_version_details["major"]}.{project_version_details["minor"]}' + + +@validate_parameters +def get_patch_version_regex(version: is_version(str)): + return rf"^{re.escape(get_minor_version(version))}{PATCH_VERSION_MATCH_FROM_MINOR_SUFFIX}$" + + +def is_merge_commit(commit: Commit): + return len(commit.parents) <= 1 + + +@validate_parameters +def get_version_details(version: is_version(str)) -> Dict[str, str]: + version_parts = version.split(".") + release_stage = "stable" + assert len(version_parts) == 3 + if project_version_contains_release_stage(version): + stage_parts = version_parts[2].split("_") + release_stage = stage_parts[1] + return { + "major": version_parts[0], + "minor": version_parts[1], + "patch": version_parts[2], + "stage": release_stage, + } + + +@validate_parameters +def get_upcoming_patch_version(version: is_version(str)) -> str: + project_version_details = get_version_details(version) + return f'{get_upcoming_minor_version(version)}.{int(project_version_details["patch"]) + 1}' + + +@validate_parameters +def get_upcoming_minor_version(version: is_version(str)) -> str: + project_version_details = get_version_details(version) + return f'{project_version_details["major"]}.{int(project_version_details["minor"]) + 1}' + + +def get_last_commit_message(path: str) -> str: + repo = get_new_repo(path) + commit = repo.head.commit + return commit.message + + +@validate_parameters +def is_major_release(version: is_version(str)) -> bool: + version_info = get_version_details(version) + return version_info["patch"] == "0" + + +def str_array_to_str(str_array: List[str]) -> str: + return f"{os.linesep.join(str_array)}{os.linesep}" + + +def get_prs_for_patch_release( + repo: Repository.Repository, + earliest_date: datetime, + base_branch: str, + last_date: datetime = None, +): + pull_requests = repo.get_pulls( + state="closed", base=base_branch, sort="created", direction="desc" + ) + + # filter pull requests according to given time interval + filtered_pull_requests = [] + for pull_request in pull_requests: + # FIXME: We hit to API rate limit when using `.merged`, so we use `.merged_at` here + if not pull_request.merged_at: + continue + if pull_request.merged_at < earliest_date: + continue + if last_date and pull_request.merged_at > last_date: + continue + + filtered_pull_requests.append(pull_request) + + # finally, sort the pr's by their merge date + sorted_pull_requests = sorted(filtered_pull_requests, key=lambda p: p.merged_at) + return sorted_pull_requests + + +def filter_prs_by_label(prs: List[PullRequest.PullRequest], label_name: str): + filtered_prs = [] + for pr in prs: + if any(label.name == label_name for label in pr.labels): + filtered_prs.append(pr) + return filtered_prs + + +def file_includes_line( + base_path: str, relative_file_path: str, line_content: str +) -> bool: + with open( + f"{base_path}/{relative_file_path}", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + content = reader.read() + lines = content.splitlines() + for line in lines: + if line == line_content: + return True + return False + + +def count_line_in_file( + base_path: str, relative_file_path: str, search_line: str +) -> int: + with open( + f"{base_path}/{relative_file_path}", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + content = reader.read() + lines = content.splitlines() + return len(list(filter(lambda line: line == search_line, lines))) + + +def replace_line_in_file(file: str, match_regex: str, replace_str: str) -> bool: + with open( + file, + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + file_content = reader.read() + lines = file_content.splitlines() + has_match = False + for line_number, line in enumerate(lines): + if re.match(match_regex, line.strip()): + has_match = True + lines[line_number] = replace_str + edited_content = str_array_to_str(lines) + with open( + file, + "w", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as writer: + writer.write(edited_content) + + return has_match + + +def append_line_in_file(file: str, match_regex: str, append_str: str) -> bool: + with open( + file, + "r+", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + file_content = reader.read() + lines = file_content.splitlines() + has_match = False + copy_lines = lines.copy() + appended_line_index = 0 + for line_number, line in enumerate(lines): + if re.match(match_regex, line.strip()): + has_match = True + + if line_number + 1 < len(lines): + copy_lines[appended_line_index + 1] = append_str + # Since line is added after matched string, shift index start with line_number+1 + # increment of appended_line_index is 2 since copy_lines appended_line_index+1 includes + # append_str + lines_to_be_shifted = lines[line_number + 1 :] + copy_lines = ( + copy_lines[0 : appended_line_index + 2] + lines_to_be_shifted + ) + else: + copy_lines.append(append_str) + appended_line_index = appended_line_index + 1 + edited_content = str_array_to_str(copy_lines) + with open( + file, + "w", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as writer: + writer.write(edited_content) + + return has_match + + +def prepend_line_in_file(file: str, match_regex: str, append_str: str) -> bool: + with open( + file, + "r+", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + file_content = reader.read() + lines = file_content.splitlines() + has_match = False + copy_lines = lines.copy() + prepended_line_index = 0 + for line_number, line in enumerate(lines): + if re.match(match_regex, line.strip()): + has_match = True + copy_lines[prepended_line_index] = append_str + # Since line is added before matched string shift index start with line_number + # increment of prepend_line_index is 1 line after prepended_line_index should be shifted + lines_to_be_shifted = lines[line_number:] + copy_lines = ( + copy_lines[0 : prepended_line_index + 1] + lines_to_be_shifted + ) + prepended_line_index = prepended_line_index + 1 + edited_content = str_array_to_str(copy_lines) + with open( + file, + "w", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as writer: + writer.write(edited_content) + + return has_match + + +def is_tag_on_branch(tag_name: str, branch_name: str): + g = git.Git(os.getcwd()) + try: + branches_str = g.execute(["git", "branch", "--contains", f"tags/{tag_name}"]) + branches = branches_str.split("\n") + print("Branches str:" + branches_str) + if len(branches) > 0: + for branch in branches: + if remove_prefix(branch, "*").strip() == branch_name: + return True + return False + except GitCommandError as e: + print("Error:" + str(e)) + return False + + +def get_current_branch(working_dir: str) -> str: + repo = get_new_repo(working_dir) + return repo.active_branch.name + + +def remote_branch_exists(branch_name: str, working_dir: str) -> bool: + repo = get_new_repo(working_dir) + for rp in repo.references: + if rp.name.endswith(f"/{branch_name}"): + return True + return False + + +def local_branch_exists(branch_name: str, working_dir: str) -> bool: + repo = get_new_repo(working_dir) + for rp in repo.branches: + if rp.name == branch_name: + return True + return False + + +def branch_exists(branch_name: str, working_dir: str) -> bool: + return local_branch_exists(branch_name, working_dir) or remote_branch_exists( + branch_name, working_dir + ) + + +def remove_cloned_code(exec_path: str): + release_all_repos() + if os.path.exists(f"{exec_path}"): + print(f"Deleting cloned code {exec_path} ...") + # https://stackoverflow.com/questions/51819472/git-cant-delete-local-branch-operation-not-permitted + # https://askubuntu.com/questions/1049142/cannot-delete-git-directory + # since git directory is readonly first we need to give write permission to delete git directory + if os.path.exists(f"{exec_path}/.git"): + run(f"chmod -R 777 {exec_path}/.git") + try: + run(f"rm -rf {exec_path}") + print("Done. Code deleted successfully.") + except subprocess.CalledProcessError: + print( + f"Some files could not be deleted in directory {exec_path}. " + f"Please delete them manually or they will be deleted before next execution" + ) + + +def process_template_file_with_minor( + project_version: str, + templates_path: str, + template_file_path: str, + minor_version: str, + postgres_version: str = "", +): + """This function gets the template files, changes tha parameters inside the file and returns the output. + Template files are stored under packaging_automation/templates and these files include parametric items in the + format of {{parameter_name}}. This function is used while creating docker files and pgxn files which include + "project_name" as parameter. Example usage is in "test_common_tool_methods/test_process_template_file". + Jinja2 is used as th the template engine and render function gets the file change parameters in the file + with the given input parameters and returns the output.""" + env = get_template_environment(templates_path) + template = env.get_template(template_file_path) + rendered_output = template.render( + project_version=project_version, + postgres_version=postgres_version, + project_minor_version=minor_version, + ) + return f"{rendered_output}\n" + + +def process_template_file( + project_version: str, + templates_path: str, + template_file_path: str, + postgres_version: str = "", +): + minor_version = get_minor_project_version(project_version) + return process_template_file_with_minor( + project_version, + templates_path, + template_file_path, + minor_version, + postgres_version, + ) + + +def write_to_file(content: str, dest_file_name: str): + with open( + dest_file_name, + "w+", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as writer: + writer.write(content) + + +def get_gpg_fingerprints_by_name(name: str) -> List[str]: + """Returns GPG fingerprint by its unique key name. We use this function to determine the fingerprint that + we should use when signing packages""" + result = subprocess.run( + shlex.split("gpg --list-keys"), check=True, stdout=subprocess.PIPE + ) + lines = result.stdout.decode("ascii").splitlines() + finger_prints = [] + previous_line = "" + for line in lines: + if line.startswith("uid") and name in line: + finger_prints.append(previous_line.strip()) + continue + previous_line = line + return finger_prints + + +def delete_gpg_key_by_name(name: str, key_type: GpgKeyType): + keys = get_gpg_fingerprints_by_name(name) + + # There could be more than one key with the same name. For statement is used to delete all the public keys + # until no key remains (i.e. key_id is empty). + # Public and private keys are stored with the same fingerprint. In some cases one of them may not be exist. + # Therefore non-existence case is possible + for key_id in keys: + if key_type == GpgKeyType.public: + delete_command = f"gpg --batch --yes --delete-key {key_id}" + elif key_type == GpgKeyType.private: + delete_command = f"gpg --batch --yes --delete-secret-key {key_id}" + else: + raise ValueError("Unsupported Gpg key type") + output = run_with_output(delete_command) + if output.returncode == 0: + print(f"{key_type.name.capitalize()} key with the id {key_id} deleted") + elif output.returncode == 2: + # Key does not exist in keyring + continue + else: + print(f"Error {output.stderr.decode('ascii')}") + break + + +def delete_public_gpg_key_by_name(name: str): + delete_gpg_key_by_name(name, GpgKeyType.public) + + +def delete_private_gpg_key_by_name(name: str): + delete_gpg_key_by_name(name, GpgKeyType.private) + + +def delete_all_gpg_keys_by_name(name: str): + delete_private_gpg_key_by_name(name) + delete_public_gpg_key_by_name(name) + + +def get_private_key_by_fingerprint_without_passphrase(fingerprint: str) -> str: + gpg = gnupg.GPG() + + private_key = gpg.export_keys(fingerprint, secret=True, expect_passphrase=False) + if not private_key: + raise ValueError( + "Error while getting key. Most probably packaging key is stored with passphrase. " + "Please check the passphrase and try again" + ) + return private_key + + +def get_private_key_by_fingerprint_with_passphrase( + fingerprint: str, passphrase: str +) -> str: + gpg = gnupg.GPG() + + private_key = gpg.export_keys(fingerprint, secret=True, passphrase=passphrase) + if not private_key: + raise ValueError( + "Error while getting key. Most probably packaging key is stored with passphrase. " + "Please check the passphrase and try again" + ) + return private_key + + +def transform_key_into_base64_str(key: str) -> str: + # while signing packages base64 encoded string is required. So first we encode key with ascii and create a + # byte array than encode it with base64 and decode it with ascii to get the required output + return base64.b64encode(key.encode("ascii")).decode("ascii") + + +def define_rpm_public_key_to_machine(fingerprint: str): + with open( + "rpm_public.key", + "w", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as writer: + subprocess.run( + shlex.split(f"gpg --export -a {fingerprint}"), stdout=writer, check=True + ) + run("rpm --import rpm_public.key") + os.remove("rpm_public.key") + + +def delete_rpm_key_by_name(summary: str): + rpm_keys = get_rpm_keys() + for key in rpm_keys: + if rpm_key_matches_summary(key, summary): + run(f"rpm -e {key}") + print(f"RPM key with id {key} was deleted") + + +def get_rpm_keys(): + result = run_with_output("rpm -q gpg-pubkey") + if result.stderr: + raise ValueError(f"Error:{result.stderr.decode('ascii')}") + output = result.stdout.decode("ascii") + key_lines = output.splitlines() + return key_lines + + +def rpm_key_matches_summary(key: str, summary: str): + result = run_with_output("rpm -q " + key + " --qf '%{SUMMARY}'") + if result.stderr: + raise ValueError(f"Error:{result.stderr.decode('ascii')}") + output = result.stdout.decode("ascii") + return summary in output + + +def is_rpm_file_signed(file_path: str) -> bool: + result = run_with_output(f"rpm -K {file_path}") + return result.returncode == 0 + + +def verify_rpm_signature_in_dir(rpm_dir_path: str): + files = [] + for dirpath, _, filenames in os.walk(rpm_dir_path): + files += [os.path.join(dirpath, file) for file in filenames] + rpm_files = filter(lambda file_name: file_name.endswith("rpm"), files) + for file in rpm_files: + if not is_rpm_file_signed(f"{file}"): + raise ValueError( + f"File {file} is not signed or there is a signature check problem" + ) + + +def remove_prefix(text, prefix): + if text.startswith(prefix): + result_str = text[len(prefix) :] + else: + result_str = text + return result_str + + +def remove_suffix(initial_str: str, suffix: str) -> str: + if initial_str.endswith(suffix): + result_str = initial_str[: -len(suffix)] + else: + result_str = initial_str + return result_str + + +def initialize_env(exec_path: str, project_name: str, checkout_dir: str): + remove_cloned_code(f"{exec_path}/{checkout_dir}") + if not os.path.exists(checkout_dir): + run(f"git clone https://github.com/citusdata/{project_name}.git {checkout_dir}") + + +def create_pr( + gh_token: str, + pr_branch: str, + pr_title: str, + repo_owner: str, + project_name: str, + base_branch: str, +): + g = Github(gh_token) + repository = g.get_repo(f"{repo_owner}/{project_name}") + create_pr_with_repo( + repo=repository, pr_branch=pr_branch, pr_title=pr_title, base_branch=base_branch + ) + + +def create_pr_with_repo( + repo: Repository, pr_branch: str, pr_title: str, base_branch: str +): + return repo.create_pull(title=pr_title, base=base_branch, head=pr_branch, body="") + + +def stat_get_request(request_address: str, request_type: RequestType, session): + request_log = RequestLog(request_time=datetime.now(), request_type=request_type) + session.add(request_log) + session.commit() + try: + result = requests.get(request_address, timeout=60) + request_log.status_code = result.status_code + request_log.response = result.content.decode("ascii") + except requests.exceptions.RequestException as e: + result = e.response + request_log.status_code = -1 + request_log.response = ( + e.response.content.decode("ascii") + if e.response.content.decode("ascii") + else str(e) + ) + finally: + session.commit() + return result + + +def get_supported_postgres_release_versions( + postgres_matrix_conf_file_path: str, package_version: is_version(str) +) -> List[str]: + with open( + postgres_matrix_conf_file_path, + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + yaml_content = yaml.load(reader, yaml.BaseLoader) + + versions_dictionary = {} + for version_info in yaml_content["version_matrix"]: + versions_dictionary[list(version_info.keys())[0]] = version_info[ + list(version_info.keys())[0] + ]["postgres_versions"] + release_versions = match_release_version(versions_dictionary, package_version) + + return release_versions + + +def get_supported_postgres_nightly_versions( + postgres_matrix_conf_file_path: str, +) -> List[str]: + with open( + postgres_matrix_conf_file_path, + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + yaml_content = yaml.load(reader, yaml.BaseLoader) + + # nightly version is the last element in the postgres matrix + latest_version_info = yaml_content["version_matrix"][-1] + nightly_versions = latest_version_info[list(latest_version_info.keys())[0]][ + "postgres_versions" + ] + return nightly_versions + + +def match_release_version(versions_dictionary, package_version: str): + versions = list(versions_dictionary.keys()) + numeric_versions_of_config: Dict[int, str] = {} + for version in versions: + numeric_versions_of_config[get_numeric_counterpart_of_version(version)] = ( + version + ) + package_version_numeric = get_numeric_counterpart_of_version(package_version) + + if package_version_numeric in numeric_versions_of_config: + version_in_str = numeric_versions_of_config[package_version_numeric] + else: + last_smallest_version = -1 + for numeric_version in numeric_versions_of_config: + if numeric_version > package_version_numeric: + break + + last_smallest_version = numeric_version + + if last_smallest_version < 0: + version_in_str = versions[0] + else: + version_in_str = numeric_versions_of_config[last_smallest_version] + + return versions_dictionary[version_in_str] + + +def get_numeric_counterpart_of_version(package_version: str): + numbers_in_version = package_version.split(".") + # add a 0 if version is minor to calculate and match for patch releases accurately + if len(numbers_in_version) == 2: + numbers_in_version.append("0") + multiplier = 1 + numeric_counterpart = 0 + for num in reversed(numbers_in_version): + numeric_counterpart = numeric_counterpart + int(num) * multiplier + multiplier = multiplier * 100 + return numeric_counterpart diff --git a/packaging_automation/common_validations.py b/packaging_automation/common_validations.py index 85c249ce..02c7ae73 100644 --- a/packaging_automation/common_validations.py +++ b/packaging_automation/common_validations.py @@ -1,33 +1,33 @@ -import re - -import string_utils -from parameters_validation import parameter_validation - -CITUS_MINOR_VERSION_PATTERN = r"\d{1,2}\.\d{1,2}" -CITUS_PATCH_VERSION_PATTERN = CITUS_MINOR_VERSION_PATTERN + r"\.\d{1,2}" - - -@parameter_validation -def is_version(version: str): - if not version: - raise ValueError("version should be non-empty and should not be None") - if not re.match(CITUS_PATCH_VERSION_PATTERN, version): - raise ValueError( - "version should include three level of digits separated by dots, e.g: 10.0.1" - ) - - -@parameter_validation -def is_tag(tag: str): - if not tag: - raise ValueError("tag should be non-empty and should not be None") - if not re.match(f"v{CITUS_PATCH_VERSION_PATTERN}", tag): - raise ValueError( - "tag should start with 'v' and should include three level of digits separated by dots, e.g: v10.0.1" - ) - - -@parameter_validation -def is_email(email: str): - if not string_utils.is_email(email): - raise ValueError("Parameter is not in email format") +import re + +import string_utils +from parameters_validation import parameter_validation + +CITUS_MINOR_VERSION_PATTERN = r"\d{1,2}\.\d{1,2}" +CITUS_PATCH_VERSION_PATTERN = CITUS_MINOR_VERSION_PATTERN + r"\.\d{1,2}" + + +@parameter_validation +def is_version(version: str): + if not version: + raise ValueError("version should be non-empty and should not be None") + if not re.match(CITUS_PATCH_VERSION_PATTERN, version): + raise ValueError( + "version should include three level of digits separated by dots, e.g: 10.0.1" + ) + + +@parameter_validation +def is_tag(tag: str): + if not tag: + raise ValueError("tag should be non-empty and should not be None") + if not re.match(f"v{CITUS_PATCH_VERSION_PATTERN}", tag): + raise ValueError( + "tag should start with 'v' and should include three level of digits separated by dots, e.g: v10.0.1" + ) + + +@parameter_validation +def is_email(email: str): + if not string_utils.is_email(email): + raise ValueError("Parameter is not in email format") diff --git a/packaging_automation/dbconfig.py b/packaging_automation/dbconfig.py index 8e714c86..3d23dafd 100644 --- a/packaging_automation/dbconfig.py +++ b/packaging_automation/dbconfig.py @@ -1,52 +1,52 @@ -import enum - -import sqlalchemy -from attr import dataclass -from sqlalchemy import Column, INTEGER, TIMESTAMP, TEXT -from sqlalchemy import create_engine -from sqlalchemy.orm import declarative_base -from sqlalchemy.orm import sessionmaker - - -@dataclass -class DbParams: - user_name: str - password: str - host_and_port: str - db_name: str - - -def db_connection_string(db_params: DbParams, is_test=False): - database_name = db_params.db_name if not is_test else f"{db_params.db_name}-test" - return f"postgresql+psycopg2://{db_params.user_name}:{db_params.password}@{db_params.host_and_port}/{database_name}" - - -def db_session(db_params: DbParams, is_test: bool, create_db_objects: bool = True): - db_engine = create_engine( - db_connection_string(db_params=db_params, is_test=is_test) - ) - if create_db_objects: - Base.metadata.create_all(db_engine) - Session = sessionmaker(db_engine) - return Session() - - -Base = declarative_base() - - -class RequestType(enum.Enum): - docker_pull = 1 - github_clone = 2 - package_cloud_list_package = 3 - package_cloud_download_series_query = 4 - package_cloud_detail_query = 5 - homebrew_download = 6 - - -class RequestLog(Base): - __tablename__ = "request_log" - id = Column(INTEGER, primary_key=True, autoincrement=True) - request_time = Column(TIMESTAMP, nullable=False) - request_type = Column(sqlalchemy.Enum(RequestType)) - status_code = Column(INTEGER) - response = Column(TEXT) +import enum + +import sqlalchemy +from attr import dataclass +from sqlalchemy import Column, INTEGER, TIMESTAMP, TEXT +from sqlalchemy import create_engine +from sqlalchemy.orm import declarative_base +from sqlalchemy.orm import sessionmaker + + +@dataclass +class DbParams: + user_name: str + password: str + host_and_port: str + db_name: str + + +def db_connection_string(db_params: DbParams, is_test=False): + database_name = db_params.db_name if not is_test else f"{db_params.db_name}-test" + return f"postgresql+psycopg2://{db_params.user_name}:{db_params.password}@{db_params.host_and_port}/{database_name}" + + +def db_session(db_params: DbParams, is_test: bool, create_db_objects: bool = True): + db_engine = create_engine( + db_connection_string(db_params=db_params, is_test=is_test) + ) + if create_db_objects: + Base.metadata.create_all(db_engine) + Session = sessionmaker(db_engine) + return Session() + + +Base = declarative_base() + + +class RequestType(enum.Enum): + docker_pull = 1 + github_clone = 2 + package_cloud_list_package = 3 + package_cloud_download_series_query = 4 + package_cloud_detail_query = 5 + homebrew_download = 6 + + +class RequestLog(Base): + __tablename__ = "request_log" + id = Column(INTEGER, primary_key=True, autoincrement=True) + request_time = Column(TIMESTAMP, nullable=False) + request_type = Column(sqlalchemy.Enum(RequestType)) + status_code = Column(INTEGER) + response = Column(TEXT) diff --git a/packaging_automation/delete_packages_on_packagecloud.py b/packaging_automation/delete_packages_on_packagecloud.py index 210061ff..efd3ff5e 100644 --- a/packaging_automation/delete_packages_on_packagecloud.py +++ b/packaging_automation/delete_packages_on_packagecloud.py @@ -1,69 +1,69 @@ -import requests -import json -from datetime import datetime -import argparse -from enum import Enum - -PAGE_RECORD_COUNT = 100 -PACKAGE_DELETION_DAYS_THRESHOLD = 10 - - -class PackageRepository(Enum): - community_nightlies = "community-nightlies" - enterprise_nightlies = "enterprise-nightlies" - - -def delete_packages(repo: PackageRepository, package_cloud_api_token: str) -> None: - url_prefix = f"https://{package_cloud_api_token}:@packagecloud.io" - - successful_count = 0 - error_count = 0 - end_of_limits_reached = False - while True: - list_url = ( - f"{url_prefix}/api/v1/repos/citusdata/{repo.value}" - f"/packages.json?per_page={PAGE_RECORD_COUNT}&page=0" - ) - result = requests.get(list_url, timeout=60) - package_info_list = json.loads(result.content) - if len(package_info_list) == 0 or end_of_limits_reached: - break - for package_info in package_info_list: - package_upload_date = datetime.strptime( - package_info["created_at"], "%Y-%m-%dT%H:%M:%S.000Z" - ) - diff = datetime.now() - package_upload_date - if diff.days > PACKAGE_DELETION_DAYS_THRESHOLD: - delete_url = f"{url_prefix}{package_info['destroy_url']}" - - del_result = requests.delete(delete_url, timeout=60) - if del_result.status_code == 200: - print(f"{package_info['filename']} deleted successfully") - successful_count = successful_count + 1 - else: - error_count = error_count + 1 - print( - f"{package_info['filename']} could not be deleted. Error Code:{del_result.status_code} " - f"Error message:{del_result.content}" - ) - else: - # no more packages older than PACKAGE_DELETION_DAYS_THRESHOLD - end_of_limits_reached = True - - print("Deletion Stats:") - print(f"Succesful Count: {successful_count}") - print(f"Error Count:{error_count}") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "--package_repo", choices=[r.value for r in PackageRepository], required=True - ) - parser.add_argument("--package_cloud_api_token", required=True) - args = parser.parse_args() - - delete_packages( - repo=PackageRepository(args.package_repo), - package_cloud_api_token=args.package_cloud_api_token, - ) +import requests +import json +from datetime import datetime +import argparse +from enum import Enum + +PAGE_RECORD_COUNT = 100 +PACKAGE_DELETION_DAYS_THRESHOLD = 10 + + +class PackageRepository(Enum): + community_nightlies = "community-nightlies" + enterprise_nightlies = "enterprise-nightlies" + + +def delete_packages(repo: PackageRepository, package_cloud_api_token: str) -> None: + url_prefix = f"https://{package_cloud_api_token}:@packagecloud.io" + + successful_count = 0 + error_count = 0 + end_of_limits_reached = False + while True: + list_url = ( + f"{url_prefix}/api/v1/repos/citusdata/{repo.value}" + f"/packages.json?per_page={PAGE_RECORD_COUNT}&page=0" + ) + result = requests.get(list_url, timeout=60) + package_info_list = json.loads(result.content) + if len(package_info_list) == 0 or end_of_limits_reached: + break + for package_info in package_info_list: + package_upload_date = datetime.strptime( + package_info["created_at"], "%Y-%m-%dT%H:%M:%S.000Z" + ) + diff = datetime.now() - package_upload_date + if diff.days > PACKAGE_DELETION_DAYS_THRESHOLD: + delete_url = f"{url_prefix}{package_info['destroy_url']}" + + del_result = requests.delete(delete_url, timeout=60) + if del_result.status_code == 200: + print(f"{package_info['filename']} deleted successfully") + successful_count = successful_count + 1 + else: + error_count = error_count + 1 + print( + f"{package_info['filename']} could not be deleted. Error Code:{del_result.status_code} " + f"Error message:{del_result.content}" + ) + else: + # no more packages older than PACKAGE_DELETION_DAYS_THRESHOLD + end_of_limits_reached = True + + print("Deletion Stats:") + print(f"Succesful Count: {successful_count}") + print(f"Error Count:{error_count}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--package_repo", choices=[r.value for r in PackageRepository], required=True + ) + parser.add_argument("--package_cloud_api_token", required=True) + args = parser.parse_args() + + delete_packages( + repo=PackageRepository(args.package_repo), + package_cloud_api_token=args.package_cloud_api_token, + ) diff --git a/packaging_automation/docker_statistics_collector.py b/packaging_automation/docker_statistics_collector.py index 31654b6b..e66cf669 100644 --- a/packaging_automation/docker_statistics_collector.py +++ b/packaging_automation/docker_statistics_collector.py @@ -1,128 +1,128 @@ -import argparse -import sys -from datetime import datetime, timedelta - -import requests -from sqlalchemy import Column, DATE, INTEGER, TIMESTAMP, desc - -from .common_tool_methods import str_array_to_str -from .dbconfig import Base, DbParams, db_session - - -class DockerStats(Base): - __tablename__ = "docker_stats" - id = Column(INTEGER, primary_key=True, autoincrement=True) - fetch_date = Column(TIMESTAMP) - stat_date = Column(DATE, unique=True) - total_pull_count = Column(INTEGER) - daily_pull_count = Column(INTEGER) - - -docker_repositories = ["citus", "membership-manager"] - - -def fetch_and_store_docker_statistics( - repository_name: str, - db_parameters: DbParams, - is_test: bool = False, - test_day_shift_index: int = 0, - test_total_pull_count: int = 0, -): - if repository_name not in docker_repositories: - raise ValueError( - f"Repository name should be in {str_array_to_str(docker_repositories)}" - ) - if not is_test and (test_day_shift_index != 0 or test_total_pull_count != 0): - raise ValueError( - "test_day_shift_index and test_total_pull_count parameters are test " - "parameters. Please don't use these parameters other than testing." - ) - - result = requests.get( - f"https://hub.docker.com/v2/repositories/citusdata/{repository_name}/", - timeout=60, - ) - total_pull_count = ( - int(result.json()["pull_count"]) - if test_total_pull_count == 0 - else test_total_pull_count - ) - - session = db_session( - db_params=db_parameters, is_test=is_test, create_db_objects=True - ) - - fetch_date = datetime.now() + timedelta(days=test_day_shift_index) - validate_same_day_record_existence(fetch_date, session) - day_diff, mod_pull_diff, pull_diff = calculate_diff_params( - fetch_date, session, total_pull_count - ) - for i in range(0, day_diff): - daily_pull_count = ( - (pull_diff - mod_pull_diff) / day_diff - if i > 0 - else (pull_diff - mod_pull_diff) / day_diff + mod_pull_diff - ) - stat_param = DockerStats( - fetch_date=fetch_date, - total_pull_count=total_pull_count, - daily_pull_count=daily_pull_count, - stat_date=fetch_date.date() - timedelta(days=i), - ) - session.add(stat_param) - session.commit() - - -def calculate_diff_params(fetch_date, session, total_pull_count): - last_stat_record = ( - session.query(DockerStats).order_by(desc(DockerStats.stat_date)).first() - ) - day_diff = ( - (fetch_date.date() - last_stat_record.stat_date).days if last_stat_record else 1 - ) - pull_diff = ( - total_pull_count - last_stat_record.total_pull_count - if last_stat_record - else total_pull_count - ) - mod_pull_diff = pull_diff % day_diff - return day_diff, mod_pull_diff, pull_diff - - -def validate_same_day_record_existence(fetch_date, session): - same_day_record = ( - session.query(DockerStats).filter_by(stat_date=fetch_date.date()).first() - ) - if same_day_record: - print( - f"Docker download record for date {fetch_date.date()} already exists. No need to add record." - ) - sys.exit(0) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--repo_name", choices=docker_repositories) - parser.add_argument("--db_user_name", required=True) - parser.add_argument("--db_password", required=True) - parser.add_argument("--db_host_and_port", required=True) - parser.add_argument("--db_name", required=True) - parser.add_argument("--is_test", action="store_true") - parser.add_argument("--test_day_shift_index", nargs="?", default=0) - parser.add_argument("--test_total_pull_count", nargs="?", default=0) - - arguments = parser.parse_args() - db_params = DbParams( - user_name=arguments.db_user_name, - password=arguments.db_password, - host_and_port=arguments.db_host_and_port, - db_name=arguments.db_name, - ) - - fetch_and_store_docker_statistics( - repository_name=arguments.repo_name, - is_test=arguments.is_test, - db_parameters=db_params, - test_day_shift_index=int(arguments.test_day_shift_index), - test_total_pull_count=int(arguments.test_total_pull_count), - ) +import argparse +import sys +from datetime import datetime, timedelta + +import requests +from sqlalchemy import Column, DATE, INTEGER, TIMESTAMP, desc + +from .common_tool_methods import str_array_to_str +from .dbconfig import Base, DbParams, db_session + + +class DockerStats(Base): + __tablename__ = "docker_stats" + id = Column(INTEGER, primary_key=True, autoincrement=True) + fetch_date = Column(TIMESTAMP) + stat_date = Column(DATE, unique=True) + total_pull_count = Column(INTEGER) + daily_pull_count = Column(INTEGER) + + +docker_repositories = ["citus", "membership-manager"] + + +def fetch_and_store_docker_statistics( + repository_name: str, + db_parameters: DbParams, + is_test: bool = False, + test_day_shift_index: int = 0, + test_total_pull_count: int = 0, +): + if repository_name not in docker_repositories: + raise ValueError( + f"Repository name should be in {str_array_to_str(docker_repositories)}" + ) + if not is_test and (test_day_shift_index != 0 or test_total_pull_count != 0): + raise ValueError( + "test_day_shift_index and test_total_pull_count parameters are test " + "parameters. Please don't use these parameters other than testing." + ) + + result = requests.get( + f"https://hub.docker.com/v2/repositories/citusdata/{repository_name}/", + timeout=60, + ) + total_pull_count = ( + int(result.json()["pull_count"]) + if test_total_pull_count == 0 + else test_total_pull_count + ) + + session = db_session( + db_params=db_parameters, is_test=is_test, create_db_objects=True + ) + + fetch_date = datetime.now() + timedelta(days=test_day_shift_index) + validate_same_day_record_existence(fetch_date, session) + day_diff, mod_pull_diff, pull_diff = calculate_diff_params( + fetch_date, session, total_pull_count + ) + for i in range(0, day_diff): + daily_pull_count = ( + (pull_diff - mod_pull_diff) / day_diff + if i > 0 + else (pull_diff - mod_pull_diff) / day_diff + mod_pull_diff + ) + stat_param = DockerStats( + fetch_date=fetch_date, + total_pull_count=total_pull_count, + daily_pull_count=daily_pull_count, + stat_date=fetch_date.date() - timedelta(days=i), + ) + session.add(stat_param) + session.commit() + + +def calculate_diff_params(fetch_date, session, total_pull_count): + last_stat_record = ( + session.query(DockerStats).order_by(desc(DockerStats.stat_date)).first() + ) + day_diff = ( + (fetch_date.date() - last_stat_record.stat_date).days if last_stat_record else 1 + ) + pull_diff = ( + total_pull_count - last_stat_record.total_pull_count + if last_stat_record + else total_pull_count + ) + mod_pull_diff = pull_diff % day_diff + return day_diff, mod_pull_diff, pull_diff + + +def validate_same_day_record_existence(fetch_date, session): + same_day_record = ( + session.query(DockerStats).filter_by(stat_date=fetch_date.date()).first() + ) + if same_day_record: + print( + f"Docker download record for date {fetch_date.date()} already exists. No need to add record." + ) + sys.exit(0) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--repo_name", choices=docker_repositories) + parser.add_argument("--db_user_name", required=True) + parser.add_argument("--db_password", required=True) + parser.add_argument("--db_host_and_port", required=True) + parser.add_argument("--db_name", required=True) + parser.add_argument("--is_test", action="store_true") + parser.add_argument("--test_day_shift_index", nargs="?", default=0) + parser.add_argument("--test_total_pull_count", nargs="?", default=0) + + arguments = parser.parse_args() + db_params = DbParams( + user_name=arguments.db_user_name, + password=arguments.db_password, + host_and_port=arguments.db_host_and_port, + db_name=arguments.db_name, + ) + + fetch_and_store_docker_statistics( + repository_name=arguments.repo_name, + is_test=arguments.is_test, + db_parameters=db_params, + test_day_shift_index=int(arguments.test_day_shift_index), + test_total_pull_count=int(arguments.test_total_pull_count), + ) diff --git a/packaging_automation/get_postgres_versions.py b/packaging_automation/get_postgres_versions.py index ad2a52f1..5221db79 100644 --- a/packaging_automation/get_postgres_versions.py +++ b/packaging_automation/get_postgres_versions.py @@ -1,12 +1,12 @@ -import argparse -import json - -from .test_citus_package import get_postgres_versions_from_matrix_file - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--project_version", required=True) - - args = parser.parse_args() - postgres_versions = get_postgres_versions_from_matrix_file(args.project_version) - print(json.dumps(postgres_versions)) +import argparse +import json + +from .test_citus_package import get_postgres_versions_from_matrix_file + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_version", required=True) + + args = parser.parse_args() + postgres_versions = get_postgres_versions_from_matrix_file(args.project_version) + print(json.dumps(postgres_versions)) diff --git a/packaging_automation/github_statistics_collector.py b/packaging_automation/github_statistics_collector.py index b07da49e..f69d2988 100644 --- a/packaging_automation/github_statistics_collector.py +++ b/packaging_automation/github_statistics_collector.py @@ -1,204 +1,204 @@ -import argparse -from datetime import datetime -from enum import Enum -from typing import Dict, Any - -from github import Github -from sqlalchemy import ( - Column, - DATE, - INTEGER, - TIMESTAMP, - ForeignKey, - String, - UniqueConstraint, -) -from sqlalchemy.orm import relationship - -from .dbconfig import Base, DbParams, db_session - -ORGANIZATION_NAME = "citusdata" - - -class GithubRepos(Enum): - citus = "citus" - pg_auto_failover = "pg-auto-failover" - - -class GithubCloneStatsTransactionsMain(Base): - __tablename__ = "github_stats_clone_transactions_main" - id = Column(INTEGER, primary_key=True, autoincrement=True) - fetch_time = Column(TIMESTAMP, nullable=False) - repo_name = Column(String, nullable=False) - count = Column(INTEGER, nullable=False) - uniques = Column(INTEGER, nullable=False) - details = relationship( - "GithubCloneStatsTransactionsDetail", - backref="github_stats_clone_transactions_main", - lazy=True, - ) - - -class GithubCloneStatsTransactionsDetail(Base): - __tablename__ = "github_stats_clone_transactions_detail" - id = Column(INTEGER, primary_key=True, autoincrement=True) - clone_date = Column(DATE, nullable=False) - count = Column(INTEGER, nullable=False) - uniques = Column(INTEGER, nullable=False) - parent_id = Column( - INTEGER, ForeignKey("github_stats_clone_transactions_main.id"), nullable=False - ) - - -class GithubCloneStats(Base): - __tablename__ = "github_clone_stats" - id = Column(INTEGER, primary_key=True, autoincrement=True) - repo_name = Column(String, nullable=False) - fetch_time = Column(TIMESTAMP, nullable=False) - clone_date = Column(DATE, nullable=False) - count = Column(INTEGER, nullable=False) - uniques = Column(INTEGER, nullable=False) - __table_args__ = ( - UniqueConstraint("repo_name", "clone_date", name="repo_name_clone_date_uq"), - ) - - -class GitHubReleases(Base): - __tablename__ = "github_releases" - id = Column(INTEGER, primary_key=True, autoincrement=True) - repo_name = Column(String, nullable=False) - fetch_time = Column(TIMESTAMP, nullable=False) - tag_name = Column(String, nullable=False) - release_time = Column(TIMESTAMP, nullable=False) - - -def clone_record_exists(record_time: datetime.date, session) -> bool: - db_record = ( - session.query(GithubCloneStats).filter_by(clone_date=record_time).first() - ) - return db_record is not None - - -def release_record_exists(tag_name: str, session) -> bool: - db_record = session.query(GitHubReleases).filter_by(tag_name=tag_name).first() - return db_record is not None - - -def github_clone_stats( - github_token: str, organization_name: str, repo_name: str -) -> Dict[str, Any]: - g = Github(github_token) - repo = g.get_repo(f"{organization_name}/{repo_name}") - return repo.get_clones_traffic(per="day") - - -def github_releases(github_token: str, organization_name: str, repo_name: str): - g = Github(github_token) - repo = g.get_repo(f"{organization_name}/{repo_name}") - return repo.get_releases() - - -def fetch_and_store_github_stats( - organization_name: str, - repo_name: str, - db_parameters: DbParams, - github_token: str, - is_test: bool, -): - fetch_and_store_github_clones( - organization_name, repo_name, db_parameters, github_token, is_test - ) - fetch_and_store_github_releases( - organization_name, repo_name, db_parameters, github_token, is_test - ) - - -def fetch_and_store_github_releases( - organization_name: str, - repo_name: str, - db_parameters: DbParams, - github_token: str, - is_test: bool, -): - releases = github_releases(github_token, organization_name, repo_name) - session = db_session(db_parameters, is_test) - for release in releases: - if not release_record_exists(release.tag_name, session): - github_release = GitHubReleases( - repo_name=repo_name, - fetch_time=datetime.now(), - tag_name=release.tag_name, - release_time=release.created_at, - ) - session.add(github_release) - session.commit() - - -def fetch_and_store_github_clones( - organization_name: str, - repo_name: str, - db_parameters: DbParams, - github_token: str, - is_test: bool, -): - contents = github_clone_stats(github_token, organization_name, repo_name) - session = db_session(db_parameters, is_test) - fetch_time = datetime.now() - main_transaction = GithubCloneStatsTransactionsMain( - fetch_time=fetch_time, - count=contents["count"], - repo_name=repo_name, - uniques=contents["uniques"], - ) - for daily_record in contents["clones"]: - detail_transaction = GithubCloneStatsTransactionsDetail( - clone_date=daily_record.timestamp, - count=daily_record.count, - uniques=daily_record.uniques, - ) - main_transaction.details.append(detail_transaction) - # current date's record is skipped since statistics continue to change until end of the day - # stat record will not be added if it exists in 'github_clone_stats' table - if daily_record.timestamp.date() == fetch_time.date() or clone_record_exists( - daily_record.timestamp.date(), session=session - ): - continue - stats_record = GithubCloneStats( - fetch_time=fetch_time, - clone_date=daily_record.timestamp, - count=daily_record.count, - uniques=daily_record.uniques, - repo_name=repo_name, - ) - - session.add(stats_record) - session.add(main_transaction) - session.commit() - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--repo_name", choices=[r.value for r in GithubRepos]) - parser.add_argument("--db_user_name", required=True) - parser.add_argument("--db_password", required=True) - parser.add_argument("--db_host_and_port", required=True) - parser.add_argument("--db_name", required=True) - parser.add_argument("--github_token", required=True) - parser.add_argument("--is_test", action="store_true") - - arguments = parser.parse_args() - - db_params = DbParams( - user_name=arguments.db_user_name, - password=arguments.db_password, - host_and_port=arguments.db_host_and_port, - db_name=arguments.db_name, - ) - - fetch_and_store_github_stats( - organization_name=ORGANIZATION_NAME, - repo_name=arguments.repo_name, - github_token=arguments.github_token, - db_parameters=db_params, - is_test=arguments.is_test, - ) +import argparse +from datetime import datetime +from enum import Enum +from typing import Dict, Any + +from github import Github +from sqlalchemy import ( + Column, + DATE, + INTEGER, + TIMESTAMP, + ForeignKey, + String, + UniqueConstraint, +) +from sqlalchemy.orm import relationship + +from .dbconfig import Base, DbParams, db_session + +ORGANIZATION_NAME = "citusdata" + + +class GithubRepos(Enum): + citus = "citus" + pg_auto_failover = "pg-auto-failover" + + +class GithubCloneStatsTransactionsMain(Base): + __tablename__ = "github_stats_clone_transactions_main" + id = Column(INTEGER, primary_key=True, autoincrement=True) + fetch_time = Column(TIMESTAMP, nullable=False) + repo_name = Column(String, nullable=False) + count = Column(INTEGER, nullable=False) + uniques = Column(INTEGER, nullable=False) + details = relationship( + "GithubCloneStatsTransactionsDetail", + backref="github_stats_clone_transactions_main", + lazy=True, + ) + + +class GithubCloneStatsTransactionsDetail(Base): + __tablename__ = "github_stats_clone_transactions_detail" + id = Column(INTEGER, primary_key=True, autoincrement=True) + clone_date = Column(DATE, nullable=False) + count = Column(INTEGER, nullable=False) + uniques = Column(INTEGER, nullable=False) + parent_id = Column( + INTEGER, ForeignKey("github_stats_clone_transactions_main.id"), nullable=False + ) + + +class GithubCloneStats(Base): + __tablename__ = "github_clone_stats" + id = Column(INTEGER, primary_key=True, autoincrement=True) + repo_name = Column(String, nullable=False) + fetch_time = Column(TIMESTAMP, nullable=False) + clone_date = Column(DATE, nullable=False) + count = Column(INTEGER, nullable=False) + uniques = Column(INTEGER, nullable=False) + __table_args__ = ( + UniqueConstraint("repo_name", "clone_date", name="repo_name_clone_date_uq"), + ) + + +class GitHubReleases(Base): + __tablename__ = "github_releases" + id = Column(INTEGER, primary_key=True, autoincrement=True) + repo_name = Column(String, nullable=False) + fetch_time = Column(TIMESTAMP, nullable=False) + tag_name = Column(String, nullable=False) + release_time = Column(TIMESTAMP, nullable=False) + + +def clone_record_exists(record_time: datetime.date, session) -> bool: + db_record = ( + session.query(GithubCloneStats).filter_by(clone_date=record_time).first() + ) + return db_record is not None + + +def release_record_exists(tag_name: str, session) -> bool: + db_record = session.query(GitHubReleases).filter_by(tag_name=tag_name).first() + return db_record is not None + + +def github_clone_stats( + github_token: str, organization_name: str, repo_name: str +) -> Dict[str, Any]: + g = Github(github_token) + repo = g.get_repo(f"{organization_name}/{repo_name}") + return repo.get_clones_traffic(per="day") + + +def github_releases(github_token: str, organization_name: str, repo_name: str): + g = Github(github_token) + repo = g.get_repo(f"{organization_name}/{repo_name}") + return repo.get_releases() + + +def fetch_and_store_github_stats( + organization_name: str, + repo_name: str, + db_parameters: DbParams, + github_token: str, + is_test: bool, +): + fetch_and_store_github_clones( + organization_name, repo_name, db_parameters, github_token, is_test + ) + fetch_and_store_github_releases( + organization_name, repo_name, db_parameters, github_token, is_test + ) + + +def fetch_and_store_github_releases( + organization_name: str, + repo_name: str, + db_parameters: DbParams, + github_token: str, + is_test: bool, +): + releases = github_releases(github_token, organization_name, repo_name) + session = db_session(db_parameters, is_test) + for release in releases: + if not release_record_exists(release.tag_name, session): + github_release = GitHubReleases( + repo_name=repo_name, + fetch_time=datetime.now(), + tag_name=release.tag_name, + release_time=release.created_at, + ) + session.add(github_release) + session.commit() + + +def fetch_and_store_github_clones( + organization_name: str, + repo_name: str, + db_parameters: DbParams, + github_token: str, + is_test: bool, +): + contents = github_clone_stats(github_token, organization_name, repo_name) + session = db_session(db_parameters, is_test) + fetch_time = datetime.now() + main_transaction = GithubCloneStatsTransactionsMain( + fetch_time=fetch_time, + count=contents["count"], + repo_name=repo_name, + uniques=contents["uniques"], + ) + for daily_record in contents["clones"]: + detail_transaction = GithubCloneStatsTransactionsDetail( + clone_date=daily_record.timestamp, + count=daily_record.count, + uniques=daily_record.uniques, + ) + main_transaction.details.append(detail_transaction) + # current date's record is skipped since statistics continue to change until end of the day + # stat record will not be added if it exists in 'github_clone_stats' table + if daily_record.timestamp.date() == fetch_time.date() or clone_record_exists( + daily_record.timestamp.date(), session=session + ): + continue + stats_record = GithubCloneStats( + fetch_time=fetch_time, + clone_date=daily_record.timestamp, + count=daily_record.count, + uniques=daily_record.uniques, + repo_name=repo_name, + ) + + session.add(stats_record) + session.add(main_transaction) + session.commit() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--repo_name", choices=[r.value for r in GithubRepos]) + parser.add_argument("--db_user_name", required=True) + parser.add_argument("--db_password", required=True) + parser.add_argument("--db_host_and_port", required=True) + parser.add_argument("--db_name", required=True) + parser.add_argument("--github_token", required=True) + parser.add_argument("--is_test", action="store_true") + + arguments = parser.parse_args() + + db_params = DbParams( + user_name=arguments.db_user_name, + password=arguments.db_password, + host_and_port=arguments.db_host_and_port, + db_name=arguments.db_name, + ) + + fetch_and_store_github_stats( + organization_name=ORGANIZATION_NAME, + repo_name=arguments.repo_name, + github_token=arguments.github_token, + db_parameters=db_params, + is_test=arguments.is_test, + ) diff --git a/packaging_automation/homebrew_statistics_collector.py b/packaging_automation/homebrew_statistics_collector.py index 847f7898..3a2e0cdb 100644 --- a/packaging_automation/homebrew_statistics_collector.py +++ b/packaging_automation/homebrew_statistics_collector.py @@ -1,66 +1,66 @@ -import argparse -import json -from datetime import datetime, date - -from sqlalchemy import Column, INTEGER, DATE, TIMESTAMP - -from .common_tool_methods import stat_get_request -from .dbconfig import Base, db_session, DbParams, RequestType - -HOMEBREW_STATS_ADDRESS = "https://formulae.brew.sh/api/formula/citus.json" - - -class HomebrewStats(Base): - __tablename__ = "homebrew_stats" - id = Column(INTEGER, primary_key=True, autoincrement=True) - fetch_time = Column(TIMESTAMP, nullable=False) - stat_date = Column(DATE, nullable=False, unique=True) - stat_30d = Column( - INTEGER, - nullable=False, - default=0, - ) - stat_90d = Column(INTEGER, nullable=False, default=0) - stat_365d = Column(INTEGER, nullable=False, default=0) - - -def fetch_and_save_homebrew_stats(db_params: DbParams, is_test: bool) -> None: - session = db_session(db_params=db_params, is_test=is_test) - - result = stat_get_request( - HOMEBREW_STATS_ADDRESS, RequestType.homebrew_download, session - ) - stat_details = json.loads(result.content) - record = session.query(HomebrewStats).filter_by(stat_date=date.today()).first() - if record is None: - hb_stat = HomebrewStats( - fetch_time=datetime.now(), - stat_date=date.today(), - stat_30d=stat_details["analytics"]["install"]["30d"]["citus"], - stat_90d=stat_details["analytics"]["install"]["90d"]["citus"], - stat_365d=stat_details["analytics"]["install"]["365d"]["citus"], - ) - session.add(hb_stat) - session.commit() - else: - print(f"Homebrew stat for the day {date.today()} already exists") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--db_user_name", required=True) - parser.add_argument("--db_password", required=True) - parser.add_argument("--db_host_and_port", required=True) - parser.add_argument("--db_name", required=True) - parser.add_argument("--is_test", action="store_true") - - arguments = parser.parse_args() - - db_parameters = DbParams( - user_name=arguments.db_user_name, - password=arguments.db_password, - host_and_port=arguments.db_host_and_port, - db_name=arguments.db_name, - ) - - fetch_and_save_homebrew_stats(db_parameters, arguments.is_test) +import argparse +import json +from datetime import datetime, date + +from sqlalchemy import Column, INTEGER, DATE, TIMESTAMP + +from .common_tool_methods import stat_get_request +from .dbconfig import Base, db_session, DbParams, RequestType + +HOMEBREW_STATS_ADDRESS = "https://formulae.brew.sh/api/formula/citus.json" + + +class HomebrewStats(Base): + __tablename__ = "homebrew_stats" + id = Column(INTEGER, primary_key=True, autoincrement=True) + fetch_time = Column(TIMESTAMP, nullable=False) + stat_date = Column(DATE, nullable=False, unique=True) + stat_30d = Column( + INTEGER, + nullable=False, + default=0, + ) + stat_90d = Column(INTEGER, nullable=False, default=0) + stat_365d = Column(INTEGER, nullable=False, default=0) + + +def fetch_and_save_homebrew_stats(db_params: DbParams, is_test: bool) -> None: + session = db_session(db_params=db_params, is_test=is_test) + + result = stat_get_request( + HOMEBREW_STATS_ADDRESS, RequestType.homebrew_download, session + ) + stat_details = json.loads(result.content) + record = session.query(HomebrewStats).filter_by(stat_date=date.today()).first() + if record is None: + hb_stat = HomebrewStats( + fetch_time=datetime.now(), + stat_date=date.today(), + stat_30d=stat_details["analytics"]["install"]["30d"]["citus"], + stat_90d=stat_details["analytics"]["install"]["90d"]["citus"], + stat_365d=stat_details["analytics"]["install"]["365d"]["citus"], + ) + session.add(hb_stat) + session.commit() + else: + print(f"Homebrew stat for the day {date.today()} already exists") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--db_user_name", required=True) + parser.add_argument("--db_password", required=True) + parser.add_argument("--db_host_and_port", required=True) + parser.add_argument("--db_name", required=True) + parser.add_argument("--is_test", action="store_true") + + arguments = parser.parse_args() + + db_parameters = DbParams( + user_name=arguments.db_user_name, + password=arguments.db_password, + host_and_port=arguments.db_host_and_port, + db_name=arguments.db_name, + ) + + fetch_and_save_homebrew_stats(db_parameters, arguments.is_test) diff --git a/packaging_automation/package_cloud_statistics_collector.py b/packaging_automation/package_cloud_statistics_collector.py index dc179797..74dd64b4 100644 --- a/packaging_automation/package_cloud_statistics_collector.py +++ b/packaging_automation/package_cloud_statistics_collector.py @@ -1,429 +1,429 @@ -import argparse -import json -import time -from datetime import datetime, date -from enum import Enum -from http import HTTPStatus - -import requests -import sqlalchemy -from attr import dataclass -from sqlalchemy import Column, INTEGER, DATE, TIMESTAMP, String, UniqueConstraint - -from .common_tool_methods import remove_suffix, stat_get_request -from .dbconfig import Base, db_session, DbParams, RequestType - -PC_PACKAGE_COUNT_SUFFIX = " packages" -PC_DOWNLOAD_DATE_FORMAT = "%Y%m%dZ" -PC_DOWNLOAD_DETAIL_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.000Z" -DEFAULT_PAGE_RECORD_COUNT = 100 - - -class PackageCloudRepo(Enum): - community = "community" - enterprise = "enterprise" - azure = "azure" - community_nightlies = "community-nightlies" - enterprise_nightlies = "enterprise-nightlies" - test = "test" - - -class PackageCloudOrganization(Enum): - citusdata = "citusdata" - citus_bot = "citus-bot" - - -class PackageCloudDownloadStats(Base): - __tablename__ = "package_cloud_download_stats" - id = Column(INTEGER, primary_key=True, autoincrement=True) - fetch_date = Column(TIMESTAMP, nullable=False) - repo = Column(sqlalchemy.Enum(PackageCloudRepo), nullable=False) - package_name = Column(String, nullable=False) - package_full_name = Column(String, nullable=False) - package_version = Column(String, nullable=False) - package_release = Column(String) - distro_version = Column(String, nullable=False) - epoch = Column(String, nullable=False) - package_type = Column(String, nullable=False) - download_date = Column(DATE, nullable=False) - download_count = Column(INTEGER, nullable=False) - detail_url = Column(String, nullable=False) - UniqueConstraint( - "package_full_name", - "download_date", - "distro_version", - name="ux_package_cloud_download_stats", - ) - - -class PackageCloudDownloadDetails(Base): - __tablename__ = "package_cloud_download_details" - id = Column(INTEGER, primary_key=True, autoincrement=True) - fetch_date = Column(TIMESTAMP, nullable=False) - repo = Column(sqlalchemy.Enum(PackageCloudRepo), nullable=False) - package_name = Column(String, nullable=False) - package_full_name = Column(String, nullable=False) - package_version = Column(String, nullable=False) - package_release = Column(String) - distro_version = Column(String, nullable=False) - epoch = Column(String, nullable=False) - package_type = Column(String, nullable=False) - downloaded_at = Column(TIMESTAMP, nullable=False) - download_date = Column(DATE, nullable=False) - ip_address = Column(String) - user_agent = Column(String) - source = Column(String) - read_token = Column(String) - - -def package_count( - organization: PackageCloudOrganization, - repo_name: PackageCloudRepo, - package_cloud_api_token: str, -) -> int: - result = requests.get( - f"https://{package_cloud_api_token}:@packagecloud.io/api/v1/repos.json?include_collaborations=true", - timeout=60, - ) - - repo_list = json.loads(result.content) - for repo in repo_list: - if repo["fqname"] == f"{organization.name}/{repo_name.value}": - return int( - remove_suffix(repo["package_count_human"], PC_PACKAGE_COUNT_SUFFIX) - ) - raise ValueError( - f"Repo name with the name {repo_name.value} could not be found on package cloud" - ) - - -@dataclass -class PackageCloudParams: - # admin api token is citusdata token to get package details. - admin_api_token: str - # citus bot api token to make api calls other than package details - standard_api_token: str - organization: PackageCloudOrganization - repo_name: PackageCloudRepo - - -@dataclass -class ParallelExecutionParams: - parallel_count: int - parallel_exec_index: int - page_record_count: int - - -def fetch_and_save_package_cloud_stats( - db_params: DbParams, - package_cloud_params: PackageCloudParams, - parallel_execution_params: ParallelExecutionParams, - is_test: bool = False, - save_records_with_download_count_zero: bool = False, -): - """It is called directly from pipeline. Packages are queried page by page from packagecloud. Packages are queried - with the given index and queried packages are saved into database using - fetch_and_save_package_stats_for_package_list method""" - repo_package_count = package_count( - organization=package_cloud_params.organization, - repo_name=package_cloud_params.repo_name, - package_cloud_api_token=package_cloud_params.standard_api_token, - ) - session = db_session(db_params=db_params, is_test=is_test) - page_index = parallel_execution_params.parallel_exec_index + 1 - start = time.time() - while is_page_in_range( - page_index, repo_package_count, parallel_execution_params.page_record_count - ): - result = stat_get_request( - package_list_with_pagination_request_address( - package_cloud_params, - page_index, - parallel_execution_params.page_record_count, - ), - RequestType.package_cloud_list_package, - session, - ) - package_info_list = json.loads(result.content) - - if len(package_info_list) > 0: - page_index = page_index + parallel_execution_params.parallel_count - else: - break - for package_info in package_info_list: - fetch_and_save_package_download_details( - package_info, - package_cloud_params.admin_api_token, - session, - package_cloud_params.repo_name, - ) - fetch_and_save_package_stats( - package_info, - package_cloud_params.standard_api_token, - session, - save_records_with_download_count_zero, - package_cloud_params.repo_name, - ) - - session.commit() - - end = time.time() - - print("Elapsed Time in seconds: " + str(end - start)) - - -def fetch_and_save_package_stats( - package_info, - package_cloud_api_token: str, - session, - save_records_with_download_count_zero: bool, - repo_name: PackageCloudRepo, -): - """Gets and saves the package statistics of the given packages""" - request_result = stat_get_request( - package_statistics_request_address( - package_cloud_api_token, package_info["downloads_series_url"] - ), - RequestType.package_cloud_download_series_query, - session, - ) - if request_result.status_code != HTTPStatus.OK: - raise ValueError( - f"Error while getting package stat for package {package_info['filename']}" - ) - download_stats = json.loads(request_result.content) - for stat_date in download_stats["value"]: - download_date = datetime.strptime(stat_date, PC_DOWNLOAD_DATE_FORMAT).date() - download_count = int(download_stats["value"][stat_date]) - if ( - download_date != date.today() - and not is_ignored_package(package_info["name"]) - and not stat_records_exists( - download_date, - package_info["filename"], - package_info["distro_version"], - session, - ) - and is_download_count_eligible_for_save( - download_count, save_records_with_download_count_zero - ) - ): - pc_stats = PackageCloudDownloadStats( - fetch_date=datetime.now(), - repo=repo_name, - package_full_name=package_info["filename"], - package_name=package_info["name"], - distro_version=package_info["distro_version"], - package_version=package_info["version"], - package_release=package_info["release"], - package_type=package_info["type"], - epoch=package_info["epoch"], - download_date=download_date, - download_count=download_count, - detail_url=package_info["downloads_detail_url"], - ) - - session.add(pc_stats) - - -def fetch_and_save_package_download_details( - package_info, - package_cloud_admin_api_token: str, - session, - repo_name: PackageCloudRepo, -): - print( - f"Download Detail Query for {package_info['filename']}: {package_info['downloads_detail_url']}" - ) - page_number = 1 - record_count = DEFAULT_PAGE_RECORD_COUNT - while record_count == DEFAULT_PAGE_RECORD_COUNT: - request_result = stat_get_request( - package_statistics_detail_request_address( - package_cloud_admin_api_token, - package_info["downloads_detail_url"], - DEFAULT_PAGE_RECORD_COUNT, - page_number, - ), - RequestType.package_cloud_detail_query, - session, - ) - page_number = page_number + 1 - if request_result.status_code != HTTPStatus.OK: - raise ValueError( - f"Error while calling detail query for package {package_info['filename']}. " - f"Error Code: {request_result.status_code}" - ) - download_details = json.loads(request_result.content) - record_count = len(download_details) - - for download_detail in download_details: - downloaded_at = datetime.strptime( - download_detail["downloaded_at"], PC_DOWNLOAD_DETAIL_DATE_FORMAT - ) - download_date = downloaded_at.date() - if ( - download_date != date.today() - and not is_ignored_package(package_info["name"]) - and not stat_records_exists( - download_date, - package_info["filename"], - package_info["distro_version"], - session, - ) - ): - download_detail_record = PackageCloudDownloadDetails( - fetch_date=datetime.now(), - repo=repo_name, - package_full_name=package_info["filename"], - package_name=package_info["name"], - distro_version=package_info["distro_version"], - package_version=package_info["version"], - package_release=package_info["release"], - package_type=package_info["type"], - epoch=package_info["epoch"], - download_date=download_date, - downloaded_at=downloaded_at, - ip_address=download_detail["ip_address"], - user_agent=download_detail["user_agent"], - source=download_detail["source"], - read_token=download_detail["read_token"], - ) - session.add(download_detail_record) - - -def package_statistics_request_address( - package_cloud_api_token: str, series_query_uri: str -): - return f"https://{package_cloud_api_token}:@packagecloud.io/{series_query_uri}" - - -def package_statistics_detail_request_address( - package_cloud_api_token: str, detail_query_uri: str, per_page: int, page_number: int -): - return f"https://{package_cloud_api_token}:@packagecloud.io/{detail_query_uri}?per_page={per_page}&page={page_number}" - - -def package_list_with_pagination_request_address( - package_cloud_params: PackageCloudParams, page_index: int, page_record_count: int -) -> str: - return ( - f"https://{package_cloud_params.standard_api_token}:@packagecloud.io/api/v1/repos/" - f"{package_cloud_params.organization.name}/{package_cloud_params.repo_name.value}" - f"/packages.json?per_page={page_record_count}&page={page_index}" - ) - - -def is_download_count_eligible_for_save( - download_count: int, save_records_with_download_count_zero: bool -) -> bool: - return download_count > 0 or ( - download_count == 0 and save_records_with_download_count_zero - ) - - -def is_page_in_range(page_index: int, total_package_count: int, page_record_count: int): - return (page_index * page_record_count < total_package_count) or ( - page_index * page_record_count - >= total_package_count - > (page_index - 1) * page_record_count - ) - - -def stat_records_exists( - download_date: date, package_full_name: str, distro_version: str, session -) -> bool: - db_record = ( - session.query(PackageCloudDownloadStats) - .filter_by( - download_date=download_date, - package_full_name=package_full_name, - distro_version=distro_version, - ) - .first() - ) - return db_record is not None - - -def detail_records_exists( - downloaded_at: datetime, - ip_address: str, - package_full_name: str, - distro_version: str, - session, -) -> bool: - db_record = ( - session.query(PackageCloudDownloadDetails) - .filter_by( - downloaded_at=downloaded_at, - ip_address=ip_address, - package_full_name=package_full_name, - distro_version=distro_version, - ) - .first() - ) - return db_record is not None - - -def is_ignored_package(package_name: str) -> bool: - ignored_suffixes = ("debuginfo", "dbgsym") - ignored_prefixes = ("citus-ha-", "pg-auto-failover-cli") - return package_name.endswith(ignored_suffixes) or package_name.startswith( - ignored_prefixes - ) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "--organization", choices=[r.value for r in PackageCloudOrganization] - ) - parser.add_argument("--repo_name", choices=[r.value for r in PackageCloudRepo]) - parser.add_argument("--db_user_name", required=True) - parser.add_argument("--db_password", required=True) - parser.add_argument("--db_host_and_port", required=True) - parser.add_argument("--db_name", required=True) - parser.add_argument("--package_cloud_api_token", required=True) - parser.add_argument("--package_cloud_admin_api_token", required=True) - parser.add_argument( - "--parallel_count", type=int, choices=range(1, 30), required=True, default=1 - ) - parser.add_argument( - "--parallel_exec_index", - type=int, - choices=range(0, 30), - required=True, - default=0, - ) - parser.add_argument( - "--page_record_count", type=int, choices=range(5, 101), required=True, default=0 - ) - parser.add_argument("--is_test", action="store_true") - - arguments = parser.parse_args() - - db_parameters = DbParams( - user_name=arguments.db_user_name, - password=arguments.db_password, - host_and_port=arguments.db_host_and_port, - db_name=arguments.db_name, - ) - - package_cloud_parameters = PackageCloudParams( - admin_api_token=arguments.package_cloud_admin_api_token, - standard_api_token=arguments.package_cloud_api_token, - organization=PackageCloudOrganization(arguments.organization), - repo_name=PackageCloudRepo(arguments.repo_name), - ) - parallel_execution_params = ParallelExecutionParams( - parallel_count=arguments.parallel_count, - parallel_exec_index=arguments.parallel_exec_index, - page_record_count=DEFAULT_PAGE_RECORD_COUNT, - ) - - fetch_and_save_package_cloud_stats( - db_parameters, - package_cloud_params=package_cloud_parameters, - parallel_execution_params=parallel_execution_params, - is_test=arguments.is_test, - ) +import argparse +import json +import time +from datetime import datetime, date +from enum import Enum +from http import HTTPStatus + +import requests +import sqlalchemy +from attr import dataclass +from sqlalchemy import Column, INTEGER, DATE, TIMESTAMP, String, UniqueConstraint + +from .common_tool_methods import remove_suffix, stat_get_request +from .dbconfig import Base, db_session, DbParams, RequestType + +PC_PACKAGE_COUNT_SUFFIX = " packages" +PC_DOWNLOAD_DATE_FORMAT = "%Y%m%dZ" +PC_DOWNLOAD_DETAIL_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.000Z" +DEFAULT_PAGE_RECORD_COUNT = 100 + + +class PackageCloudRepo(Enum): + community = "community" + enterprise = "enterprise" + azure = "azure" + community_nightlies = "community-nightlies" + enterprise_nightlies = "enterprise-nightlies" + test = "test" + + +class PackageCloudOrganization(Enum): + citusdata = "citusdata" + citus_bot = "citus-bot" + + +class PackageCloudDownloadStats(Base): + __tablename__ = "package_cloud_download_stats" + id = Column(INTEGER, primary_key=True, autoincrement=True) + fetch_date = Column(TIMESTAMP, nullable=False) + repo = Column(sqlalchemy.Enum(PackageCloudRepo), nullable=False) + package_name = Column(String, nullable=False) + package_full_name = Column(String, nullable=False) + package_version = Column(String, nullable=False) + package_release = Column(String) + distro_version = Column(String, nullable=False) + epoch = Column(String, nullable=False) + package_type = Column(String, nullable=False) + download_date = Column(DATE, nullable=False) + download_count = Column(INTEGER, nullable=False) + detail_url = Column(String, nullable=False) + UniqueConstraint( + "package_full_name", + "download_date", + "distro_version", + name="ux_package_cloud_download_stats", + ) + + +class PackageCloudDownloadDetails(Base): + __tablename__ = "package_cloud_download_details" + id = Column(INTEGER, primary_key=True, autoincrement=True) + fetch_date = Column(TIMESTAMP, nullable=False) + repo = Column(sqlalchemy.Enum(PackageCloudRepo), nullable=False) + package_name = Column(String, nullable=False) + package_full_name = Column(String, nullable=False) + package_version = Column(String, nullable=False) + package_release = Column(String) + distro_version = Column(String, nullable=False) + epoch = Column(String, nullable=False) + package_type = Column(String, nullable=False) + downloaded_at = Column(TIMESTAMP, nullable=False) + download_date = Column(DATE, nullable=False) + ip_address = Column(String) + user_agent = Column(String) + source = Column(String) + read_token = Column(String) + + +def package_count( + organization: PackageCloudOrganization, + repo_name: PackageCloudRepo, + package_cloud_api_token: str, +) -> int: + result = requests.get( + f"https://{package_cloud_api_token}:@packagecloud.io/api/v1/repos.json?include_collaborations=true", + timeout=60, + ) + + repo_list = json.loads(result.content) + for repo in repo_list: + if repo["fqname"] == f"{organization.name}/{repo_name.value}": + return int( + remove_suffix(repo["package_count_human"], PC_PACKAGE_COUNT_SUFFIX) + ) + raise ValueError( + f"Repo name with the name {repo_name.value} could not be found on package cloud" + ) + + +@dataclass +class PackageCloudParams: + # admin api token is citusdata token to get package details. + admin_api_token: str + # citus bot api token to make api calls other than package details + standard_api_token: str + organization: PackageCloudOrganization + repo_name: PackageCloudRepo + + +@dataclass +class ParallelExecutionParams: + parallel_count: int + parallel_exec_index: int + page_record_count: int + + +def fetch_and_save_package_cloud_stats( + db_params: DbParams, + package_cloud_params: PackageCloudParams, + parallel_execution_params: ParallelExecutionParams, + is_test: bool = False, + save_records_with_download_count_zero: bool = False, +): + """It is called directly from pipeline. Packages are queried page by page from packagecloud. Packages are queried + with the given index and queried packages are saved into database using + fetch_and_save_package_stats_for_package_list method""" + repo_package_count = package_count( + organization=package_cloud_params.organization, + repo_name=package_cloud_params.repo_name, + package_cloud_api_token=package_cloud_params.standard_api_token, + ) + session = db_session(db_params=db_params, is_test=is_test) + page_index = parallel_execution_params.parallel_exec_index + 1 + start = time.time() + while is_page_in_range( + page_index, repo_package_count, parallel_execution_params.page_record_count + ): + result = stat_get_request( + package_list_with_pagination_request_address( + package_cloud_params, + page_index, + parallel_execution_params.page_record_count, + ), + RequestType.package_cloud_list_package, + session, + ) + package_info_list = json.loads(result.content) + + if len(package_info_list) > 0: + page_index = page_index + parallel_execution_params.parallel_count + else: + break + for package_info in package_info_list: + fetch_and_save_package_download_details( + package_info, + package_cloud_params.admin_api_token, + session, + package_cloud_params.repo_name, + ) + fetch_and_save_package_stats( + package_info, + package_cloud_params.standard_api_token, + session, + save_records_with_download_count_zero, + package_cloud_params.repo_name, + ) + + session.commit() + + end = time.time() + + print("Elapsed Time in seconds: " + str(end - start)) + + +def fetch_and_save_package_stats( + package_info, + package_cloud_api_token: str, + session, + save_records_with_download_count_zero: bool, + repo_name: PackageCloudRepo, +): + """Gets and saves the package statistics of the given packages""" + request_result = stat_get_request( + package_statistics_request_address( + package_cloud_api_token, package_info["downloads_series_url"] + ), + RequestType.package_cloud_download_series_query, + session, + ) + if request_result.status_code != HTTPStatus.OK: + raise ValueError( + f"Error while getting package stat for package {package_info['filename']}" + ) + download_stats = json.loads(request_result.content) + for stat_date in download_stats["value"]: + download_date = datetime.strptime(stat_date, PC_DOWNLOAD_DATE_FORMAT).date() + download_count = int(download_stats["value"][stat_date]) + if ( + download_date != date.today() + and not is_ignored_package(package_info["name"]) + and not stat_records_exists( + download_date, + package_info["filename"], + package_info["distro_version"], + session, + ) + and is_download_count_eligible_for_save( + download_count, save_records_with_download_count_zero + ) + ): + pc_stats = PackageCloudDownloadStats( + fetch_date=datetime.now(), + repo=repo_name, + package_full_name=package_info["filename"], + package_name=package_info["name"], + distro_version=package_info["distro_version"], + package_version=package_info["version"], + package_release=package_info["release"], + package_type=package_info["type"], + epoch=package_info["epoch"], + download_date=download_date, + download_count=download_count, + detail_url=package_info["downloads_detail_url"], + ) + + session.add(pc_stats) + + +def fetch_and_save_package_download_details( + package_info, + package_cloud_admin_api_token: str, + session, + repo_name: PackageCloudRepo, +): + print( + f"Download Detail Query for {package_info['filename']}: {package_info['downloads_detail_url']}" + ) + page_number = 1 + record_count = DEFAULT_PAGE_RECORD_COUNT + while record_count == DEFAULT_PAGE_RECORD_COUNT: + request_result = stat_get_request( + package_statistics_detail_request_address( + package_cloud_admin_api_token, + package_info["downloads_detail_url"], + DEFAULT_PAGE_RECORD_COUNT, + page_number, + ), + RequestType.package_cloud_detail_query, + session, + ) + page_number = page_number + 1 + if request_result.status_code != HTTPStatus.OK: + raise ValueError( + f"Error while calling detail query for package {package_info['filename']}. " + f"Error Code: {request_result.status_code}" + ) + download_details = json.loads(request_result.content) + record_count = len(download_details) + + for download_detail in download_details: + downloaded_at = datetime.strptime( + download_detail["downloaded_at"], PC_DOWNLOAD_DETAIL_DATE_FORMAT + ) + download_date = downloaded_at.date() + if ( + download_date != date.today() + and not is_ignored_package(package_info["name"]) + and not stat_records_exists( + download_date, + package_info["filename"], + package_info["distro_version"], + session, + ) + ): + download_detail_record = PackageCloudDownloadDetails( + fetch_date=datetime.now(), + repo=repo_name, + package_full_name=package_info["filename"], + package_name=package_info["name"], + distro_version=package_info["distro_version"], + package_version=package_info["version"], + package_release=package_info["release"], + package_type=package_info["type"], + epoch=package_info["epoch"], + download_date=download_date, + downloaded_at=downloaded_at, + ip_address=download_detail["ip_address"], + user_agent=download_detail["user_agent"], + source=download_detail["source"], + read_token=download_detail["read_token"], + ) + session.add(download_detail_record) + + +def package_statistics_request_address( + package_cloud_api_token: str, series_query_uri: str +): + return f"https://{package_cloud_api_token}:@packagecloud.io/{series_query_uri}" + + +def package_statistics_detail_request_address( + package_cloud_api_token: str, detail_query_uri: str, per_page: int, page_number: int +): + return f"https://{package_cloud_api_token}:@packagecloud.io/{detail_query_uri}?per_page={per_page}&page={page_number}" + + +def package_list_with_pagination_request_address( + package_cloud_params: PackageCloudParams, page_index: int, page_record_count: int +) -> str: + return ( + f"https://{package_cloud_params.standard_api_token}:@packagecloud.io/api/v1/repos/" + f"{package_cloud_params.organization.name}/{package_cloud_params.repo_name.value}" + f"/packages.json?per_page={page_record_count}&page={page_index}" + ) + + +def is_download_count_eligible_for_save( + download_count: int, save_records_with_download_count_zero: bool +) -> bool: + return download_count > 0 or ( + download_count == 0 and save_records_with_download_count_zero + ) + + +def is_page_in_range(page_index: int, total_package_count: int, page_record_count: int): + return (page_index * page_record_count < total_package_count) or ( + page_index * page_record_count + >= total_package_count + > (page_index - 1) * page_record_count + ) + + +def stat_records_exists( + download_date: date, package_full_name: str, distro_version: str, session +) -> bool: + db_record = ( + session.query(PackageCloudDownloadStats) + .filter_by( + download_date=download_date, + package_full_name=package_full_name, + distro_version=distro_version, + ) + .first() + ) + return db_record is not None + + +def detail_records_exists( + downloaded_at: datetime, + ip_address: str, + package_full_name: str, + distro_version: str, + session, +) -> bool: + db_record = ( + session.query(PackageCloudDownloadDetails) + .filter_by( + downloaded_at=downloaded_at, + ip_address=ip_address, + package_full_name=package_full_name, + distro_version=distro_version, + ) + .first() + ) + return db_record is not None + + +def is_ignored_package(package_name: str) -> bool: + ignored_suffixes = ("debuginfo", "dbgsym") + ignored_prefixes = ("citus-ha-", "pg-auto-failover-cli") + return package_name.endswith(ignored_suffixes) or package_name.startswith( + ignored_prefixes + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--organization", choices=[r.value for r in PackageCloudOrganization] + ) + parser.add_argument("--repo_name", choices=[r.value for r in PackageCloudRepo]) + parser.add_argument("--db_user_name", required=True) + parser.add_argument("--db_password", required=True) + parser.add_argument("--db_host_and_port", required=True) + parser.add_argument("--db_name", required=True) + parser.add_argument("--package_cloud_api_token", required=True) + parser.add_argument("--package_cloud_admin_api_token", required=True) + parser.add_argument( + "--parallel_count", type=int, choices=range(1, 30), required=True, default=1 + ) + parser.add_argument( + "--parallel_exec_index", + type=int, + choices=range(0, 30), + required=True, + default=0, + ) + parser.add_argument( + "--page_record_count", type=int, choices=range(5, 101), required=True, default=0 + ) + parser.add_argument("--is_test", action="store_true") + + arguments = parser.parse_args() + + db_parameters = DbParams( + user_name=arguments.db_user_name, + password=arguments.db_password, + host_and_port=arguments.db_host_and_port, + db_name=arguments.db_name, + ) + + package_cloud_parameters = PackageCloudParams( + admin_api_token=arguments.package_cloud_admin_api_token, + standard_api_token=arguments.package_cloud_api_token, + organization=PackageCloudOrganization(arguments.organization), + repo_name=PackageCloudRepo(arguments.repo_name), + ) + parallel_execution_params = ParallelExecutionParams( + parallel_count=arguments.parallel_count, + parallel_exec_index=arguments.parallel_exec_index, + page_record_count=DEFAULT_PAGE_RECORD_COUNT, + ) + + fetch_and_save_package_cloud_stats( + db_parameters, + package_cloud_params=package_cloud_parameters, + parallel_execution_params=parallel_execution_params, + is_test=arguments.is_test, + ) diff --git a/packaging_automation/packaging_warning_handler.py b/packaging_automation/packaging_warning_handler.py index c9b337a7..2a43aa48 100644 --- a/packaging_automation/packaging_warning_handler.py +++ b/packaging_automation/packaging_warning_handler.py @@ -1,170 +1,170 @@ -import os -import re -import sys -from enum import Enum -from typing import List, Tuple - -import yaml - -from .common_tool_methods import ( - PackageType, - DEFAULT_ENCODING_FOR_FILE_HANDLING, - DEFAULT_UNICODE_ERROR_HANDLER, -) - - -class PackagingWarningIgnoreType(Enum): - base = 1 - debian = 2 - rpm = 3 - - -def validate_output(output: str, ignore_file_path: str, package_type: PackageType): - base_ignore_list, package_type_specific_ignore_list = parse_ignore_lists( - ignore_file_path, package_type - ) - - print(f"Package type specific ignore list:{package_type_specific_ignore_list}") - print(f"Base ignore list:{base_ignore_list}") - - output_lines = output.splitlines() - warning_lines, package_type_specific_warning_lines = filter_warning_lines( - output_lines, package_type - ) - print("Checking build output for warnings") - print("Package Type:" + package_type.name) - print(f"Package type specific warnings:{package_type_specific_warning_lines}") - - base_warnings_to_be_raised = get_warnings_to_be_raised( - base_ignore_list, warning_lines - ) - package_type_specific_warnings_to_be_raised = get_warnings_to_be_raised( - package_type_specific_ignore_list, package_type_specific_warning_lines - ) - - print( - f"Package type specific warnings to be raised:{package_type_specific_warnings_to_be_raised}" - ) - print(f"Base warnings to be raised:{base_warnings_to_be_raised}") - - if ( - len(base_warnings_to_be_raised) > 0 - or len(package_type_specific_warnings_to_be_raised) > 0 - ): - error_message = get_error_message( - base_warnings_to_be_raised, - package_type_specific_warnings_to_be_raised, - package_type, - ) - print(f"Build output check failed. Error Message: \n{error_message}") - sys.exit(1) - else: - print("Build output check completed succesfully. No warnings") - - -def filter_warning_lines( - output_lines: List[str], package_type: PackageType -) -> Tuple[List[str], List[str]]: - rpm_warning_summary = ( - r"\d+ packages and \d+ specfiles checked; \d+ errors, \d+ warnings." - ) - rpm_lintian_starter = 'Executing "/usr/bin/rpmlint -f /rpmlintrc' - debian_lintian_starter = "Now running lintian" - lintian_warning_error_pattern = r".*: [W|E]: .*" - - base_warning_lines = [] - package_specific_warning_lines = [] - is_deb_warning_line = False - is_rpm_warning_line = False - for output_line in output_lines: - if package_type == PackageType.deb: - if debian_lintian_starter in output_line: - is_deb_warning_line = True - elif "warning" in output_line.lower() or is_deb_warning_line: - if is_deb_warning_line: - match = re.match(lintian_warning_error_pattern, output_line) - if match: - package_specific_warning_lines.append(output_line) - else: - is_deb_warning_line = False - else: - base_warning_lines.append(output_line) - else: - if rpm_lintian_starter in output_line: - is_rpm_warning_line = True - elif "warning" in output_line.lower() or is_rpm_warning_line: - if is_rpm_warning_line and re.match(rpm_warning_summary, output_line): - is_rpm_warning_line = False - continue - if re.match(lintian_warning_error_pattern, output_line): - package_specific_warning_lines.append(output_line) - else: - base_warning_lines.append(output_line) - else: - continue - - return base_warning_lines, package_specific_warning_lines - - -def parse_ignore_lists(ignore_file_path: str, package_type: PackageType): - base_ignore_list = [] - packaging_warning_type = ( - PackagingWarningIgnoreType.debian - if package_type == PackageType.deb - else PackagingWarningIgnoreType.rpm - ) - package_type_specific_ignore_list = [] - with open( - ignore_file_path, - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - yaml_content = yaml.load(reader, yaml.BaseLoader) - if PackagingWarningIgnoreType.base.name in yaml_content: - base_ignore_list = yaml_content[PackagingWarningIgnoreType.base.name] - if packaging_warning_type.name in yaml_content: - package_type_specific_ignore_list = yaml_content[packaging_warning_type.name] - - return base_ignore_list, package_type_specific_ignore_list - - -def get_warnings_to_be_raised( - ignore_list: List[str], warning_lines: List[str] -) -> List[str]: - warnings_to_be_raised = [] - for warning_line in warning_lines: - has_ignore_match = False - for ignore_line in ignore_list: - if re.match(ignore_line, warning_line): - has_ignore_match = True - break - if not has_ignore_match: - warnings_to_be_raised.append(warning_line) - return warnings_to_be_raised - - -def get_error_message( - base_warnings_to_be_raised: List[str], - package_specific_warnings_to_be_raised: List[str], - package_type: PackageType, -) -> str: - error_message = "" - package_type_specific_header = ( - "Debian Warning lines:\n" - if package_type == PackageType.deb - else "Rpm Warning lines:\n" - ) - error_message = ( - f"{error_message}Warning lines:\n{os.linesep.join(base_warnings_to_be_raised)}\n" - if len(base_warnings_to_be_raised) > 0 - else error_message - ) - error_message = ( - f"{error_message}{package_type_specific_header}" - f"{os.linesep.join(package_specific_warnings_to_be_raised)}\n" - if len(package_specific_warnings_to_be_raised) > 0 - else error_message - ) - - return error_message +import os +import re +import sys +from enum import Enum +from typing import List, Tuple + +import yaml + +from .common_tool_methods import ( + PackageType, + DEFAULT_ENCODING_FOR_FILE_HANDLING, + DEFAULT_UNICODE_ERROR_HANDLER, +) + + +class PackagingWarningIgnoreType(Enum): + base = 1 + debian = 2 + rpm = 3 + + +def validate_output(output: str, ignore_file_path: str, package_type: PackageType): + base_ignore_list, package_type_specific_ignore_list = parse_ignore_lists( + ignore_file_path, package_type + ) + + print(f"Package type specific ignore list:{package_type_specific_ignore_list}") + print(f"Base ignore list:{base_ignore_list}") + + output_lines = output.splitlines() + warning_lines, package_type_specific_warning_lines = filter_warning_lines( + output_lines, package_type + ) + print("Checking build output for warnings") + print("Package Type:" + package_type.name) + print(f"Package type specific warnings:{package_type_specific_warning_lines}") + + base_warnings_to_be_raised = get_warnings_to_be_raised( + base_ignore_list, warning_lines + ) + package_type_specific_warnings_to_be_raised = get_warnings_to_be_raised( + package_type_specific_ignore_list, package_type_specific_warning_lines + ) + + print( + f"Package type specific warnings to be raised:{package_type_specific_warnings_to_be_raised}" + ) + print(f"Base warnings to be raised:{base_warnings_to_be_raised}") + + if ( + len(base_warnings_to_be_raised) > 0 + or len(package_type_specific_warnings_to_be_raised) > 0 + ): + error_message = get_error_message( + base_warnings_to_be_raised, + package_type_specific_warnings_to_be_raised, + package_type, + ) + print(f"Build output check failed. Error Message: \n{error_message}") + sys.exit(1) + else: + print("Build output check completed succesfully. No warnings") + + +def filter_warning_lines( + output_lines: List[str], package_type: PackageType +) -> Tuple[List[str], List[str]]: + rpm_warning_summary = ( + r"\d+ packages and \d+ specfiles checked; \d+ errors, \d+ warnings." + ) + rpm_lintian_starter = 'Executing "/usr/bin/rpmlint -f /rpmlintrc' + debian_lintian_starter = "Now running lintian" + lintian_warning_error_pattern = r".*: [W|E]: .*" + + base_warning_lines = [] + package_specific_warning_lines = [] + is_deb_warning_line = False + is_rpm_warning_line = False + for output_line in output_lines: + if package_type == PackageType.deb: + if debian_lintian_starter in output_line: + is_deb_warning_line = True + elif "warning" in output_line.lower() or is_deb_warning_line: + if is_deb_warning_line: + match = re.match(lintian_warning_error_pattern, output_line) + if match: + package_specific_warning_lines.append(output_line) + else: + is_deb_warning_line = False + else: + base_warning_lines.append(output_line) + else: + if rpm_lintian_starter in output_line: + is_rpm_warning_line = True + elif "warning" in output_line.lower() or is_rpm_warning_line: + if is_rpm_warning_line and re.match(rpm_warning_summary, output_line): + is_rpm_warning_line = False + continue + if re.match(lintian_warning_error_pattern, output_line): + package_specific_warning_lines.append(output_line) + else: + base_warning_lines.append(output_line) + else: + continue + + return base_warning_lines, package_specific_warning_lines + + +def parse_ignore_lists(ignore_file_path: str, package_type: PackageType): + base_ignore_list = [] + packaging_warning_type = ( + PackagingWarningIgnoreType.debian + if package_type == PackageType.deb + else PackagingWarningIgnoreType.rpm + ) + package_type_specific_ignore_list = [] + with open( + ignore_file_path, + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + yaml_content = yaml.load(reader, yaml.BaseLoader) + if PackagingWarningIgnoreType.base.name in yaml_content: + base_ignore_list = yaml_content[PackagingWarningIgnoreType.base.name] + if packaging_warning_type.name in yaml_content: + package_type_specific_ignore_list = yaml_content[packaging_warning_type.name] + + return base_ignore_list, package_type_specific_ignore_list + + +def get_warnings_to_be_raised( + ignore_list: List[str], warning_lines: List[str] +) -> List[str]: + warnings_to_be_raised = [] + for warning_line in warning_lines: + has_ignore_match = False + for ignore_line in ignore_list: + if re.match(ignore_line, warning_line): + has_ignore_match = True + break + if not has_ignore_match: + warnings_to_be_raised.append(warning_line) + return warnings_to_be_raised + + +def get_error_message( + base_warnings_to_be_raised: List[str], + package_specific_warnings_to_be_raised: List[str], + package_type: PackageType, +) -> str: + error_message = "" + package_type_specific_header = ( + "Debian Warning lines:\n" + if package_type == PackageType.deb + else "Rpm Warning lines:\n" + ) + error_message = ( + f"{error_message}Warning lines:\n{os.linesep.join(base_warnings_to_be_raised)}\n" + if len(base_warnings_to_be_raised) > 0 + else error_message + ) + error_message = ( + f"{error_message}{package_type_specific_header}" + f"{os.linesep.join(package_specific_warnings_to_be_raised)}\n" + if len(package_specific_warnings_to_be_raised) > 0 + else error_message + ) + + return error_message diff --git a/packaging_automation/prepare_release.py b/packaging_automation/prepare_release.py index 888528a2..6b75bbfc 100644 --- a/packaging_automation/prepare_release.py +++ b/packaging_automation/prepare_release.py @@ -1,848 +1,848 @@ -import argparse -import os -import uuid -from dataclasses import dataclass -from datetime import datetime - -import pathlib2 -from github import Github, Repository -from parameters_validation import non_blank, non_empty -from typing import Dict - -from .common_tool_methods import ( - get_version_details, - is_major_release, - get_prs_for_patch_release, - filter_prs_by_label, - cherry_pick_prs, - run, - replace_line_in_file, - get_current_branch, - find_nth_matching_line_and_line_number, - get_patch_version_regex, - remote_branch_exists, - local_branch_exists, - prepend_line_in_file, - get_template_environment, - get_upcoming_minor_version, - remove_cloned_code, - initialize_env, - create_pr_with_repo, - DEFAULT_ENCODING_FOR_FILE_HANDLING, - DEFAULT_UNICODE_ERROR_HANDLER, -) -from .common_validations import CITUS_MINOR_VERSION_PATTERN, CITUS_PATCH_VERSION_PATTERN - -MULTI_EXTENSION_SQL = "src/test/regress/sql/multi_extension.sql" -CITUS_CONTROL = "src/backend/distributed/citus.control" -MULTI_EXTENSION_OUT = "src/test/regress/expected/multi_extension.out" -CONFIG_PY = "src/test/regress/upgrade/config.py" -DISTRIBUTED_SQL_DIR_PATH = "src/backend/distributed/sql" -DOWNGRADES_DIR_PATH = f"{DISTRIBUTED_SQL_DIR_PATH}/downgrades" -CONFIGURE_IN = "configure.in" -CONFIGURE = "configure" -CITUS_CONTROL_SEARCH_PATTERN = r"^default_version*" - -MULTI_EXT_DEVEL_SEARCH_PATTERN = rf"^\s*{CITUS_MINOR_VERSION_PATTERN}devel$" -MULTI_EXT_PATCH_SEARCH_PATTERN = rf"^\s*{CITUS_PATCH_VERSION_PATTERN}$" - -MULTI_EXT_DETAIL_PREFIX = r"DETAIL: Loaded library requires " -MULTI_EXT_DETAIL1_SUFFIX = r", but 8.0-1 was specified." -MULTI_EXT_DETAIL2_SUFFIX = r", but the installed extension version is 8.1-1." -MULTI_EXT_DETAIL1_PATTERN = ( - rf"^{MULTI_EXT_DETAIL_PREFIX}\d+\.\d+{MULTI_EXT_DETAIL1_SUFFIX}$" -) - -MULTI_EXT_DETAIL2_PATTERN = ( - rf"^{MULTI_EXT_DETAIL_PREFIX}\d+\.\d+{MULTI_EXT_DETAIL2_SUFFIX}$" -) - -CONFIG_PY_MASTER_VERSION_SEARCH_PATTERN = r"^MASTER_VERSION = '\d+\.\d+'" - -CONFIGURE_IN_SEARCH_PATTERN = "AC_INIT*" -REPO_OWNER = "citusdata" - -BASE_PATH = pathlib2.Path(__file__).parent.absolute() -TEMPLATES_PATH = f"{BASE_PATH}/templates" - -MULTI_EXT_OUT_TEMPLATE_FILE = "multi_extension_out_prepare_release.tmpl" -MULTI_EXT_SQL_TEMPLATE_FILE = "multi_extension_sql_prepare_release.tmpl" - -repo_details = { - "citus": {"configure-in-str": "Citus", "branch": "master"}, - "citus-enterprise": { - "configure-in-str": "Citus Enterprise", - "branch": "enterprise-master", - }, -} - - -@dataclass -class UpdateReleaseReturnValue: - release_branch_name: str - upcoming_version_branch: str - upgrade_path_sql_file: str - downgrade_path_sql_file: str - - -@dataclass -class MajorReleaseParams: - configure_in_path: str - devel_version: str - is_test: bool - main_branch: str - multi_extension_out_path: str - project_name: str - project_version: str - release_branch_name: str - - -@dataclass -class UpcomingVersionBranchParams: - citus_control_file_path: str - config_py_path: str - configure_in_path: str - upcoming_devel_version: str - distributed_dir_path: str - downgrades_dir_path: str - is_test: bool - main_branch: str - multi_extension_out_path: str - multi_extension_sql_path: str - project_name: str - project_version: str - repository: Repository - upcoming_minor_version: str - upcoming_version_branch: str - - -@dataclass -class PatchReleaseParams: - cherry_pick_enabled: bool - configure_in_path: str - earliest_pr_date_value: datetime - is_test: bool - main_branch: str - citus_control_file_path: str - multi_extension_out_path: str - project_name: str - project_version: str - release_branch_name: str - schema_version: str - repository: Repository - - -@dataclass -class ProjectParams: - project_name: str - project_version: str - main_branch: str - schema_version: str - - -@dataclass -class PathParams: - multi_extension_sql_path: str - citus_control_file_path: str - multi_extension_out_path: str - configure_in_path: str - config_py_path: str - distributed_dir_path: str - downgrades_dir_path: str - - -@dataclass -class BranchParams: - release_branch_name: str - upcoming_version_branch: str - - -@dataclass -class VersionParams: - project_version_details: Dict[str, str] - upcoming_minor_version: str - upcoming_devel_version: str - - -BASE_GIT_PATH = pathlib2.Path(__file__).parents[1] - - -@dataclass -class MigrationFiles: - upgrade_file: str - downgrade_file: str - - -# disabled since this is related to parameter_validations library methods -# pylint: disable=no-value-for-parameter -def update_release( - github_token: non_blank(non_empty(str)), - project_params: ProjectParams, - earliest_pr_date: datetime, - exec_path: non_blank(non_empty(str)), - is_test: bool = False, - cherry_pick_enabled: bool = False, -) -> UpdateReleaseReturnValue: - path_params = PathParams( - multi_extension_out_path=f"{exec_path}/{MULTI_EXTENSION_OUT}", - multi_extension_sql_path=f"{exec_path}/{MULTI_EXTENSION_SQL}", - citus_control_file_path=f"{exec_path}/{CITUS_CONTROL}", - configure_in_path=f"{exec_path}/{CONFIGURE_IN}", - config_py_path=f"{exec_path}/{CONFIG_PY}", - distributed_dir_path=f"{exec_path}/{DISTRIBUTED_SQL_DIR_PATH}", - downgrades_dir_path=f"{exec_path}/{DOWNGRADES_DIR_PATH}", - ) - - version_params = VersionParams( - project_version_details=get_version_details(project_params.project_version), - upcoming_minor_version=get_upcoming_minor_version( - project_params.project_version - ), - upcoming_devel_version=f"{get_upcoming_minor_version(project_params.project_version)}devel", - ) - - branch_params = BranchParams( - release_branch_name=get_release_branch_name( - is_test, version_params.project_version_details - ), - upcoming_version_branch=f"master-update-version-{uuid.uuid4()}", - ) - - repository = get_github_repository(github_token, project_params) - - upcoming_version_branch = "" - - migration_files = MigrationFiles("", "") - # major release - if is_major_release(project_params.project_version): - print( - f"### {project_params.project_version} is a major release. Executing Major release flow... ###" - ) - major_release_params = MajorReleaseParams( - configure_in_path=path_params.configure_in_path, - devel_version=version_params.upcoming_devel_version, - is_test=is_test, - main_branch=project_params.main_branch, - multi_extension_out_path=path_params.multi_extension_out_path, - project_name=project_params.project_name, - project_version=project_params.project_version, - release_branch_name=branch_params.release_branch_name, - ) - prepare_release_branch_for_major_release(major_release_params) - upcoming_version_branch_params = UpcomingVersionBranchParams( - project_version=project_params.project_version, - project_name=project_params.project_name, - upcoming_version_branch=branch_params.upcoming_version_branch, - upcoming_devel_version=version_params.upcoming_devel_version, - is_test=is_test, - main_branch=project_params.main_branch, - citus_control_file_path=path_params.citus_control_file_path, - config_py_path=path_params.config_py_path, - configure_in_path=path_params.configure_in_path, - distributed_dir_path=path_params.distributed_dir_path, - downgrades_dir_path=path_params.downgrades_dir_path, - repository=repository, - upcoming_minor_version=version_params.upcoming_minor_version, - multi_extension_out_path=path_params.multi_extension_out_path, - multi_extension_sql_path=path_params.multi_extension_sql_path, - ) - upcoming_version_branch = upcoming_version_branch_params.upcoming_version_branch - - migration_files = prepare_upcoming_version_branch( - upcoming_version_branch_params - ) - print( - f"### Done {project_params.project_version} Major release flow executed successfully. ###" - ) - # patch release - else: - patch_release_params = PatchReleaseParams( - cherry_pick_enabled=cherry_pick_enabled, - configure_in_path=path_params.configure_in_path, - earliest_pr_date_value=earliest_pr_date, - is_test=is_test, - main_branch=project_params.main_branch, - multi_extension_out_path=path_params.multi_extension_out_path, - project_name=project_params.project_name, - project_version=project_params.project_version, - schema_version=project_params.schema_version, - citus_control_file_path=path_params.citus_control_file_path, - release_branch_name=branch_params.release_branch_name, - repository=repository, - ) - prepare_release_branch_for_patch_release(patch_release_params) - return UpdateReleaseReturnValue( - release_branch_name=branch_params.release_branch_name, - upcoming_version_branch=upcoming_version_branch, - upgrade_path_sql_file=f"{DISTRIBUTED_SQL_DIR_PATH}/{migration_files.upgrade_file}", - downgrade_path_sql_file=f"{DOWNGRADES_DIR_PATH}/{migration_files.downgrade_file}", - ) - - -def get_github_repository(github_token, project_params): - g = Github(github_token) - repository = g.get_repo(f"{REPO_OWNER}/{project_params.project_name}") - return repository - - -def get_release_branch_name(is_test, project_version_details): - release_branch_name = ( - f'release-{project_version_details["major"]}.{project_version_details["minor"]}' - ) - release_branch_name = ( - f"{release_branch_name}-test" if is_test else release_branch_name - ) - return release_branch_name - - -def prepare_release_branch_for_patch_release(patchReleaseParams: PatchReleaseParams): - print( - f"### {patchReleaseParams.project_version} is a patch release. Executing Patch release flow... ###" - ) - # checkout release branch (release-X.Y) In test case release branch for test may not be exist. - # In this case create one - if patchReleaseParams.is_test: - non_test_release_branch = patchReleaseParams.release_branch_name.rstrip("-test") - release_branch_exist = remote_branch_exists( - non_test_release_branch, os.getcwd() - ) - test_release_branch_exist = local_branch_exists( - patchReleaseParams.release_branch_name, os.getcwd() - ) - - if release_branch_exist: - run(f"git checkout {non_test_release_branch}") - run(f"git checkout -b {patchReleaseParams.release_branch_name}") - elif test_release_branch_exist: - run(f"git checkout {patchReleaseParams.release_branch_name}") - else: - run(f"git checkout -b {patchReleaseParams.release_branch_name}") - else: - checkout_branch( - patchReleaseParams.release_branch_name, patchReleaseParams.is_test - ) - # change version info in configure.in file - update_version_in_configure_in( - patchReleaseParams.project_name, - patchReleaseParams.configure_in_path, - patchReleaseParams.project_version, - ) - # execute "auto-conf " - execute_autoconf_f() - # change version info in multi_extension.out - update_version_in_multi_extension_out_for_patch( - patchReleaseParams.multi_extension_out_path, patchReleaseParams.project_version - ) - # if schema version is not empty update citus.control schema version - if patchReleaseParams.schema_version: - update_schema_version_in_citus_control( - citus_control_file_path=patchReleaseParams.citus_control_file_path, - schema_version=patchReleaseParams.schema_version, - ) - if patchReleaseParams.cherry_pick_enabled: - # cherry-pick the pr's with backport labels - cherrypick_prs_with_backport_labels( - patchReleaseParams.earliest_pr_date_value, - patchReleaseParams.main_branch, - patchReleaseParams.release_branch_name, - patchReleaseParams.repository, - ) - # commit all changes - commit_changes_for_version_bump( - patchReleaseParams.project_name, patchReleaseParams.project_version - ) - # create and push release-$minor_version-push-$curTime branch - release_pr_branch = f"{patchReleaseParams.release_branch_name}_{uuid.uuid4()}" - create_and_checkout_branch(release_pr_branch) - if not patchReleaseParams.is_test: - push_branch(release_pr_branch) - - print("### Done Patch release flow executed successfully. ###") - - -def prepare_upcoming_version_branch(upcoming_params: UpcomingVersionBranchParams): - print( - f"### {upcoming_params.upcoming_version_branch} flow is being executed... ###" - ) - # checkout master - checkout_branch(upcoming_params.main_branch, upcoming_params.is_test) - # create master-update-version-$curtime branch - create_and_checkout_branch(upcoming_params.upcoming_version_branch) - # update version info with upcoming version on configure.in - update_version_in_configure_in( - upcoming_params.project_name, - upcoming_params.configure_in_path, - upcoming_params.upcoming_devel_version, - ) - # update version info with upcoming version on config.py - update_version_with_upcoming_version_in_config_py( - upcoming_params.config_py_path, upcoming_params.upcoming_minor_version - ) - # execute autoconf -f - execute_autoconf_f() - # update version info with upcoming version on multi_extension.out - update_version_in_multi_extension_out( - upcoming_params.multi_extension_out_path, upcoming_params.upcoming_devel_version - ) - # update detail lines with minor version - update_detail_strings_in_multi_extension_out( - upcoming_params.multi_extension_out_path, upcoming_params.upcoming_minor_version - ) - # get current schema version from citus.control - current_schema_version = get_current_schema_from_citus_control( - upcoming_params.citus_control_file_path - ) - # add downgrade script in multi_extension.sql file - add_downgrade_script_in_multi_extension_file( - current_schema_version, - upcoming_params.multi_extension_sql_path, - upcoming_params.upcoming_minor_version, - MULTI_EXT_SQL_TEMPLATE_FILE, - ) - # add downgrade script in multi_extension.out file - add_downgrade_script_in_multi_extension_file( - current_schema_version, - upcoming_params.multi_extension_out_path, - upcoming_params.upcoming_minor_version, - MULTI_EXT_OUT_TEMPLATE_FILE, - ) - # create a new sql file for upgrade path: - upgrade_file = create_new_sql_for_upgrade_path( - current_schema_version, - upcoming_params.distributed_dir_path, - upcoming_params.upcoming_minor_version, - ) - # create a new sql file for downgrade path: - downgrade_file = create_new_sql_for_downgrade_path( - current_schema_version, - upcoming_params.downgrades_dir_path, - upcoming_params.upcoming_minor_version, - ) - - # change version in citus.control file - default_upcoming_schema_version = f"{upcoming_params.upcoming_minor_version}-1" - update_schema_version_in_citus_control( - upcoming_params.citus_control_file_path, default_upcoming_schema_version - ) - # commit and push changes on master-update-version-$curtime branch - commit_changes_for_version_bump( - upcoming_params.project_name, upcoming_params.upcoming_devel_version - ) - if not upcoming_params.is_test: - push_branch(upcoming_params.upcoming_version_branch) - - # create pull request - create_pull_request_for_upcoming_version_branch( - upcoming_params.repository, - upcoming_params.main_branch, - upcoming_params.upcoming_version_branch, - upcoming_params.upcoming_devel_version, - ) - print(f"### Done {upcoming_params.upcoming_version_branch} flow executed. ###") - return MigrationFiles(upgrade_file=upgrade_file, downgrade_file=downgrade_file) - - -def prepare_release_branch_for_major_release(majorReleaseParams: MajorReleaseParams): - print( - f"### {majorReleaseParams.release_branch_name} release branch flow is being executed... ###" - ) - # checkout master - checkout_branch(majorReleaseParams.main_branch, majorReleaseParams.is_test) - # create release branch in release-X.Y format - create_and_checkout_branch(majorReleaseParams.release_branch_name) - # change version info in configure.in file - update_version_in_configure_in( - majorReleaseParams.project_name, - majorReleaseParams.configure_in_path, - majorReleaseParams.project_version, - ) - # execute "autoconf -f" - execute_autoconf_f() - # change version info in multi_extension.out - update_version_in_multi_extension_out( - majorReleaseParams.multi_extension_out_path, majorReleaseParams.project_version - ) - # commit all changes - commit_changes_for_version_bump( - majorReleaseParams.project_name, majorReleaseParams.project_version - ) - # push release branch (No PR creation!!!) - if not majorReleaseParams.is_test: - push_branch(majorReleaseParams.release_branch_name) - print( - f"### Done {majorReleaseParams.release_branch_name} release branch flow executed .###" - ) - - -def cherrypick_prs_with_backport_labels( - earliest_pr_date, main_branch, release_branch_name, repository -): - print( - f"### Getting all PR with backport label after {datetime.strftime(earliest_pr_date, '%Y.%m.%d %H:%M')}... ### " - ) - prs_with_earliest_date = get_prs_for_patch_release( - repository, earliest_pr_date, main_branch - ) - # get commits for selected prs with backport label - prs_with_backport = filter_prs_by_label(prs_with_earliest_date, "backport") - print( - f"### Done {len(prs_with_backport)} PRs with backport label found. PR list is as below. ###" - ) - for pr in prs_with_backport: - print(f"\tNo:{pr.number} Title:{pr.title}") - # cherrypick all commits with backport label - print(f"### Cherry-picking PRs to {release_branch_name}... ###") - cherry_pick_prs(prs_with_backport) - print( - f"### Done Cherry pick completed for all PRs on branch {release_branch_name}. ###" - ) - - -def create_pull_request_for_upcoming_version_branch( - repository, main_branch, upcoming_version_branch, upcoming_version -): - print(f"### Creating pull request for {upcoming_version_branch}... ###") - pr_result = create_pr_with_repo( - repo=repository, - pr_branch=upcoming_version_branch, - pr_title=f"Bump Citus to {upcoming_version}", - base_branch=main_branch, - ) - print( - f"### Done Pull request created. PR no:{pr_result.number} PR URL: {pr_result.url}. ### " - ) - - -def push_branch(upcoming_version_branch): - print(f"Pushing changes for {upcoming_version_branch} into remote origin... ###") - run(f"git push --set-upstream origin {upcoming_version_branch}") - print(f"### Done Changes pushed for {upcoming_version_branch}. ###") - - -def commit_changes_for_version_bump(project_name, project_version): - current_branch = get_current_branch(os.getcwd()) - print(f"### Committing changes for branch {current_branch}... ###") - run("git add .") - run(f' git commit -m "Bump {project_name} version to {project_version} "') - print(f"### Done Changes committed for {current_branch}. ###") - - -def update_schema_version_in_citus_control(citus_control_file_path, schema_version): - print( - f"### Updating {citus_control_file_path} file with the version {schema_version}... ###" - ) - if not replace_line_in_file( - citus_control_file_path, - CITUS_CONTROL_SEARCH_PATTERN, - f"default_version = '{schema_version}'", - ): - raise ValueError(f"{citus_control_file_path} does not have match for version") - print( - f"### Done {citus_control_file_path} file is updated with the schema version {schema_version}. ###" - ) - - -def add_downgrade_script_in_multi_extension_file( - current_schema_version, - multi_extension_out_path, - upcoming_minor_version, - template_file: str, -): - print( - f"### Adding downgrade scripts from version {current_schema_version} to " - f"{upcoming_minor_version} on {multi_extension_out_path}... ### " - ) - env = get_template_environment(TEMPLATES_PATH) - template = env.get_template( - template_file - ) # multi_extension_out_prepare_release.tmpl multi_extension_sql_prepare_release.tmpl - string_to_prepend = f"{template.render(current_schema_version=current_schema_version, upcoming_minor_version=f'{upcoming_minor_version}-1')}\n" - - if not prepend_line_in_file( - multi_extension_out_path, - "DROP TABLE prev_objects, extension_diff;", - string_to_prepend, - ): - raise ValueError( - f"Downgrade scripts could not be added in {multi_extension_out_path} since " - f"'DROP TABLE prev_objects, extension_diff;' script could not be found " - ) - print( - f"### Done Test downgrade scripts successfully added in {multi_extension_out_path}. ###" - ) - - -def get_current_schema_from_citus_control(citus_control_file_path: str) -> str: - print(f"### Reading current schema version from {citus_control_file_path}... ###") - current_schema_version = "" - with open( - citus_control_file_path, - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as cc_reader: - cc_file_content = cc_reader.read() - _, cc_line = find_nth_matching_line_and_line_number( - cc_file_content, CITUS_CONTROL_SEARCH_PATTERN, 1 - ) - schema_not_found = False - if len(cc_line) > 0: - line_parts = cc_line.split("=") - if len(line_parts) == 2: - current_schema_version = line_parts[1] - else: - schema_not_found = True - else: - schema_not_found = True - - if schema_not_found: - raise ValueError("Version info could not be found in citus.control file") - - current_schema_version = current_schema_version.strip(" '") - print(f"### Done Schema version is {current_schema_version}. ###") - return current_schema_version - - -def update_version_with_upcoming_version_in_config_py( - config_py_path, upcoming_minor_version -): - print( - f"### Updating {config_py_path} file with the upcoming version {upcoming_minor_version}... ###" - ) - if not replace_line_in_file( - config_py_path, - CONFIG_PY_MASTER_VERSION_SEARCH_PATTERN, - f"MASTER_VERSION = '{upcoming_minor_version}'", - ): - raise ValueError(f"{config_py_path} does not have match for version") - print( - f"### Done {config_py_path} file updated with the upcoming version {upcoming_minor_version}. ###" - ) - - -def update_version_in_multi_extension_out(multi_extension_out_path, project_version): - print( - f"### Updating {multi_extension_out_path} file with the project version {project_version}... ###" - ) - - if not replace_line_in_file( - multi_extension_out_path, MULTI_EXT_DEVEL_SEARCH_PATTERN, f" {project_version}" - ): - raise ValueError( - f"{multi_extension_out_path} does not contain the version with pattern {MULTI_EXT_DEVEL_SEARCH_PATTERN}" - ) - print( - f"### Done {multi_extension_out_path} file is updated with project version {project_version}. ###" - ) - - -def update_detail_strings_in_multi_extension_out( - multi_extension_out_path, minor_version -): - print( - f"### Updating {multi_extension_out_path} detail lines file with the project version {minor_version}... ###" - ) - - if not replace_line_in_file( - multi_extension_out_path, - MULTI_EXT_DETAIL1_PATTERN, - f"{MULTI_EXT_DETAIL_PREFIX}{minor_version}{MULTI_EXT_DETAIL1_SUFFIX}", - ): - raise ValueError( - f"{multi_extension_out_path} does not contain the version with pattern {MULTI_EXT_DETAIL1_PATTERN}" - ) - - if not replace_line_in_file( - multi_extension_out_path, - MULTI_EXT_DETAIL2_PATTERN, - f"{MULTI_EXT_DETAIL_PREFIX}{minor_version}{MULTI_EXT_DETAIL2_SUFFIX}", - ): - raise ValueError( - f"{multi_extension_out_path} does not contain the version with pattern {MULTI_EXT_DETAIL2_PATTERN}" - ) - - print( - f"### Done {multi_extension_out_path} detail lines updated with project version {minor_version}. ###" - ) - - -def update_version_in_multi_extension_out_for_patch( - multi_extension_out_path, project_version -): - print( - f"### Updating {multi_extension_out_path} file with the project version {project_version}... ###" - ) - - if not replace_line_in_file( - multi_extension_out_path, - get_patch_version_regex(project_version), - f" {project_version}", - ): - raise ValueError( - f"{multi_extension_out_path} does not contain the version with pattern {get_patch_version_regex(project_version)}" - ) - print( - f"### Done {multi_extension_out_path} file is updated with project version {project_version}. ###" - ) - - -def execute_autoconf_f(): - print("### Executing autoconf -f command... ###") - run("autoconf -f") - print("### Done autoconf -f executed. ###") - - -def update_version_in_configure_in(project_name, configure_in_path, project_version): - print(f"### Updating version on file {configure_in_path}... ###") - if not replace_line_in_file( - configure_in_path, - CONFIGURE_IN_SEARCH_PATTERN, - f"AC_INIT([{repo_details[project_name]['configure-in-str']}], [{project_version}])", - ): - raise ValueError(f"{configure_in_path} does not have match for version") - print( - f"### Done {configure_in_path} file is updated with project version {project_version}. ###" - ) - - -def create_and_checkout_branch(release_branch_name): - print( - f"### Creating release branch with name {release_branch_name} from {get_current_branch(os.getcwd())}... ###" - ) - run(f"git checkout -b {release_branch_name}") - print(f"### Done {release_branch_name} created. ###") - - -def checkout_branch(branch_name, is_test): - print(f"### Checking out {branch_name}... ###") - run(f"git checkout {branch_name}") - if not is_test: - run("git pull") - - print(f"### Done {branch_name} checked out and pulled. ###") - - -def upgrade_sql_file_name(current_schema_version, upcoming_minor_version): - return f"citus--{current_schema_version}--{upcoming_minor_version}-1.sql" - - -def create_new_sql_for_upgrade_path( - current_schema_version, distributed_dir_path, upcoming_minor_version -): - newly_created_sql_file = upgrade_sql_file_name( - current_schema_version, upcoming_minor_version - ) - print(f"### Creating upgrade file {newly_created_sql_file}... ###") - with open( - f"{distributed_dir_path}/{newly_created_sql_file}", - "w", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as f_writer: - content = f"-- citus--{current_schema_version}--{upcoming_minor_version}-1" - content = content + "\n\n" - content = content + f"-- bump version to {upcoming_minor_version}-1" + "\n\n" - f_writer.write(content) - print(f"### Done {newly_created_sql_file} created. ###") - return newly_created_sql_file - - -def create_new_sql_for_downgrade_path( - current_schema_version, distributed_dir_path, upcoming_minor_version -): - newly_created_sql_file = ( - f"citus--{upcoming_minor_version}-1--{current_schema_version}.sql" - ) - print(f"### Creating downgrade file {newly_created_sql_file}... ###") - with open( - f"{distributed_dir_path}/{newly_created_sql_file}", - "w", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as f_writer: - content = f"-- citus--{upcoming_minor_version}-1--{current_schema_version}" - content = content + "\n" - content = ( - content + f"-- this is an empty downgrade path since " - f"{upgrade_sql_file_name(current_schema_version, upcoming_minor_version)} " - f"is empty for now" + "\n" - ) - f_writer.write(content) - print(f"### Done {newly_created_sql_file} created. ###") - return newly_created_sql_file - - -CHECKOUT_DIR = "citus_temp" - - -def validate_parameters(major_release_flag: bool): - if major_release_flag and arguments.cherry_pick_enabled: - raise ValueError("Cherry pick could be enabled only for patch release") - - if major_release_flag and arguments.earliest_pr_date: - raise ValueError("earliest_pr_date could not be used for major releases") - - if major_release_flag and arguments.schema_version: - raise ValueError("schema_version could not be set for major releases") - - if ( - not major_release_flag - and arguments.cherry_pick_enabled - and not arguments.earliest_pr_date - ): - raise ValueError( - "earliest_pr_date parameter could not be empty when cherry pick is enabled and release is major." - ) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--gh_token", required=True) - parser.add_argument( - "--prj_name", choices=["citus", "citus-enterprise"], required=True - ) - parser.add_argument("--prj_ver", required=True) - parser.add_argument("--main_branch") - parser.add_argument("--earliest_pr_date") - parser.add_argument("--cherry_pick_enabled", action="store_true") - parser.add_argument("--is_test", action="store_true") - parser.add_argument("--schema_version", nargs="?") - arguments = parser.parse_args() - execution_path = f"{os.getcwd()}/{CHECKOUT_DIR}" - major_release = is_major_release(arguments.prj_ver) - validate_parameters(major_release) - - try: - initialize_env(execution_path, arguments.prj_name, CHECKOUT_DIR) - - is_cherry_pick_enabled = arguments.cherry_pick_enabled - main_branch = ( - arguments.main_branch - if arguments.main_branch - else repo_details[arguments.prj_name]["branch"] - ) - print(f"Using main branch {main_branch} for the repo {arguments.prj_name}.") - os.chdir(execution_path) - print(f"Executing in path {execution_path}") - earliest_pr_date_value = ( - None - if major_release or not is_cherry_pick_enabled - else datetime.strptime(arguments.earliest_pr_date, "%Y.%m.%d") - ) - proj_params = ProjectParams( - project_name=arguments.prj_name, - project_version=arguments.prj_ver, - main_branch=main_branch, - schema_version=arguments.schema_version, - ) - update_release( - github_token=arguments.gh_token, - project_params=proj_params, - earliest_pr_date=earliest_pr_date_value, - is_test=arguments.is_test, - cherry_pick_enabled=arguments.cherry_pick_enabled, - exec_path=execution_path, - ) - finally: - if not arguments.is_test: - remove_cloned_code(execution_path) +import argparse +import os +import uuid +from dataclasses import dataclass +from datetime import datetime + +import pathlib2 +from github import Github, Repository +from parameters_validation import non_blank, non_empty +from typing import Dict + +from .common_tool_methods import ( + get_version_details, + is_major_release, + get_prs_for_patch_release, + filter_prs_by_label, + cherry_pick_prs, + run, + replace_line_in_file, + get_current_branch, + find_nth_matching_line_and_line_number, + get_patch_version_regex, + remote_branch_exists, + local_branch_exists, + prepend_line_in_file, + get_template_environment, + get_upcoming_minor_version, + remove_cloned_code, + initialize_env, + create_pr_with_repo, + DEFAULT_ENCODING_FOR_FILE_HANDLING, + DEFAULT_UNICODE_ERROR_HANDLER, +) +from .common_validations import CITUS_MINOR_VERSION_PATTERN, CITUS_PATCH_VERSION_PATTERN + +MULTI_EXTENSION_SQL = "src/test/regress/sql/multi_extension.sql" +CITUS_CONTROL = "src/backend/distributed/citus.control" +MULTI_EXTENSION_OUT = "src/test/regress/expected/multi_extension.out" +CONFIG_PY = "src/test/regress/upgrade/config.py" +DISTRIBUTED_SQL_DIR_PATH = "src/backend/distributed/sql" +DOWNGRADES_DIR_PATH = f"{DISTRIBUTED_SQL_DIR_PATH}/downgrades" +CONFIGURE_IN = "configure.in" +CONFIGURE = "configure" +CITUS_CONTROL_SEARCH_PATTERN = r"^default_version*" + +MULTI_EXT_DEVEL_SEARCH_PATTERN = rf"^\s*{CITUS_MINOR_VERSION_PATTERN}devel$" +MULTI_EXT_PATCH_SEARCH_PATTERN = rf"^\s*{CITUS_PATCH_VERSION_PATTERN}$" + +MULTI_EXT_DETAIL_PREFIX = r"DETAIL: Loaded library requires " +MULTI_EXT_DETAIL1_SUFFIX = r", but 8.0-1 was specified." +MULTI_EXT_DETAIL2_SUFFIX = r", but the installed extension version is 8.1-1." +MULTI_EXT_DETAIL1_PATTERN = ( + rf"^{MULTI_EXT_DETAIL_PREFIX}\d+\.\d+{MULTI_EXT_DETAIL1_SUFFIX}$" +) + +MULTI_EXT_DETAIL2_PATTERN = ( + rf"^{MULTI_EXT_DETAIL_PREFIX}\d+\.\d+{MULTI_EXT_DETAIL2_SUFFIX}$" +) + +CONFIG_PY_MASTER_VERSION_SEARCH_PATTERN = r"^MASTER_VERSION = '\d+\.\d+'" + +CONFIGURE_IN_SEARCH_PATTERN = "AC_INIT*" +REPO_OWNER = "citusdata" + +BASE_PATH = pathlib2.Path(__file__).parent.absolute() +TEMPLATES_PATH = f"{BASE_PATH}/templates" + +MULTI_EXT_OUT_TEMPLATE_FILE = "multi_extension_out_prepare_release.tmpl" +MULTI_EXT_SQL_TEMPLATE_FILE = "multi_extension_sql_prepare_release.tmpl" + +repo_details = { + "citus": {"configure-in-str": "Citus", "branch": "master"}, + "citus-enterprise": { + "configure-in-str": "Citus Enterprise", + "branch": "enterprise-master", + }, +} + + +@dataclass +class UpdateReleaseReturnValue: + release_branch_name: str + upcoming_version_branch: str + upgrade_path_sql_file: str + downgrade_path_sql_file: str + + +@dataclass +class MajorReleaseParams: + configure_in_path: str + devel_version: str + is_test: bool + main_branch: str + multi_extension_out_path: str + project_name: str + project_version: str + release_branch_name: str + + +@dataclass +class UpcomingVersionBranchParams: + citus_control_file_path: str + config_py_path: str + configure_in_path: str + upcoming_devel_version: str + distributed_dir_path: str + downgrades_dir_path: str + is_test: bool + main_branch: str + multi_extension_out_path: str + multi_extension_sql_path: str + project_name: str + project_version: str + repository: Repository + upcoming_minor_version: str + upcoming_version_branch: str + + +@dataclass +class PatchReleaseParams: + cherry_pick_enabled: bool + configure_in_path: str + earliest_pr_date_value: datetime + is_test: bool + main_branch: str + citus_control_file_path: str + multi_extension_out_path: str + project_name: str + project_version: str + release_branch_name: str + schema_version: str + repository: Repository + + +@dataclass +class ProjectParams: + project_name: str + project_version: str + main_branch: str + schema_version: str + + +@dataclass +class PathParams: + multi_extension_sql_path: str + citus_control_file_path: str + multi_extension_out_path: str + configure_in_path: str + config_py_path: str + distributed_dir_path: str + downgrades_dir_path: str + + +@dataclass +class BranchParams: + release_branch_name: str + upcoming_version_branch: str + + +@dataclass +class VersionParams: + project_version_details: Dict[str, str] + upcoming_minor_version: str + upcoming_devel_version: str + + +BASE_GIT_PATH = pathlib2.Path(__file__).parents[1] + + +@dataclass +class MigrationFiles: + upgrade_file: str + downgrade_file: str + + +# disabled since this is related to parameter_validations library methods +# pylint: disable=no-value-for-parameter +def update_release( + github_token: non_blank(non_empty(str)), + project_params: ProjectParams, + earliest_pr_date: datetime, + exec_path: non_blank(non_empty(str)), + is_test: bool = False, + cherry_pick_enabled: bool = False, +) -> UpdateReleaseReturnValue: + path_params = PathParams( + multi_extension_out_path=f"{exec_path}/{MULTI_EXTENSION_OUT}", + multi_extension_sql_path=f"{exec_path}/{MULTI_EXTENSION_SQL}", + citus_control_file_path=f"{exec_path}/{CITUS_CONTROL}", + configure_in_path=f"{exec_path}/{CONFIGURE_IN}", + config_py_path=f"{exec_path}/{CONFIG_PY}", + distributed_dir_path=f"{exec_path}/{DISTRIBUTED_SQL_DIR_PATH}", + downgrades_dir_path=f"{exec_path}/{DOWNGRADES_DIR_PATH}", + ) + + version_params = VersionParams( + project_version_details=get_version_details(project_params.project_version), + upcoming_minor_version=get_upcoming_minor_version( + project_params.project_version + ), + upcoming_devel_version=f"{get_upcoming_minor_version(project_params.project_version)}devel", + ) + + branch_params = BranchParams( + release_branch_name=get_release_branch_name( + is_test, version_params.project_version_details + ), + upcoming_version_branch=f"master-update-version-{uuid.uuid4()}", + ) + + repository = get_github_repository(github_token, project_params) + + upcoming_version_branch = "" + + migration_files = MigrationFiles("", "") + # major release + if is_major_release(project_params.project_version): + print( + f"### {project_params.project_version} is a major release. Executing Major release flow... ###" + ) + major_release_params = MajorReleaseParams( + configure_in_path=path_params.configure_in_path, + devel_version=version_params.upcoming_devel_version, + is_test=is_test, + main_branch=project_params.main_branch, + multi_extension_out_path=path_params.multi_extension_out_path, + project_name=project_params.project_name, + project_version=project_params.project_version, + release_branch_name=branch_params.release_branch_name, + ) + prepare_release_branch_for_major_release(major_release_params) + upcoming_version_branch_params = UpcomingVersionBranchParams( + project_version=project_params.project_version, + project_name=project_params.project_name, + upcoming_version_branch=branch_params.upcoming_version_branch, + upcoming_devel_version=version_params.upcoming_devel_version, + is_test=is_test, + main_branch=project_params.main_branch, + citus_control_file_path=path_params.citus_control_file_path, + config_py_path=path_params.config_py_path, + configure_in_path=path_params.configure_in_path, + distributed_dir_path=path_params.distributed_dir_path, + downgrades_dir_path=path_params.downgrades_dir_path, + repository=repository, + upcoming_minor_version=version_params.upcoming_minor_version, + multi_extension_out_path=path_params.multi_extension_out_path, + multi_extension_sql_path=path_params.multi_extension_sql_path, + ) + upcoming_version_branch = upcoming_version_branch_params.upcoming_version_branch + + migration_files = prepare_upcoming_version_branch( + upcoming_version_branch_params + ) + print( + f"### Done {project_params.project_version} Major release flow executed successfully. ###" + ) + # patch release + else: + patch_release_params = PatchReleaseParams( + cherry_pick_enabled=cherry_pick_enabled, + configure_in_path=path_params.configure_in_path, + earliest_pr_date_value=earliest_pr_date, + is_test=is_test, + main_branch=project_params.main_branch, + multi_extension_out_path=path_params.multi_extension_out_path, + project_name=project_params.project_name, + project_version=project_params.project_version, + schema_version=project_params.schema_version, + citus_control_file_path=path_params.citus_control_file_path, + release_branch_name=branch_params.release_branch_name, + repository=repository, + ) + prepare_release_branch_for_patch_release(patch_release_params) + return UpdateReleaseReturnValue( + release_branch_name=branch_params.release_branch_name, + upcoming_version_branch=upcoming_version_branch, + upgrade_path_sql_file=f"{DISTRIBUTED_SQL_DIR_PATH}/{migration_files.upgrade_file}", + downgrade_path_sql_file=f"{DOWNGRADES_DIR_PATH}/{migration_files.downgrade_file}", + ) + + +def get_github_repository(github_token, project_params): + g = Github(github_token) + repository = g.get_repo(f"{REPO_OWNER}/{project_params.project_name}") + return repository + + +def get_release_branch_name(is_test, project_version_details): + release_branch_name = ( + f'release-{project_version_details["major"]}.{project_version_details["minor"]}' + ) + release_branch_name = ( + f"{release_branch_name}-test" if is_test else release_branch_name + ) + return release_branch_name + + +def prepare_release_branch_for_patch_release(patchReleaseParams: PatchReleaseParams): + print( + f"### {patchReleaseParams.project_version} is a patch release. Executing Patch release flow... ###" + ) + # checkout release branch (release-X.Y) In test case release branch for test may not be exist. + # In this case create one + if patchReleaseParams.is_test: + non_test_release_branch = patchReleaseParams.release_branch_name.rstrip("-test") + release_branch_exist = remote_branch_exists( + non_test_release_branch, os.getcwd() + ) + test_release_branch_exist = local_branch_exists( + patchReleaseParams.release_branch_name, os.getcwd() + ) + + if release_branch_exist: + run(f"git checkout {non_test_release_branch}") + run(f"git checkout -b {patchReleaseParams.release_branch_name}") + elif test_release_branch_exist: + run(f"git checkout {patchReleaseParams.release_branch_name}") + else: + run(f"git checkout -b {patchReleaseParams.release_branch_name}") + else: + checkout_branch( + patchReleaseParams.release_branch_name, patchReleaseParams.is_test + ) + # change version info in configure.in file + update_version_in_configure_in( + patchReleaseParams.project_name, + patchReleaseParams.configure_in_path, + patchReleaseParams.project_version, + ) + # execute "auto-conf " + execute_autoconf_f() + # change version info in multi_extension.out + update_version_in_multi_extension_out_for_patch( + patchReleaseParams.multi_extension_out_path, patchReleaseParams.project_version + ) + # if schema version is not empty update citus.control schema version + if patchReleaseParams.schema_version: + update_schema_version_in_citus_control( + citus_control_file_path=patchReleaseParams.citus_control_file_path, + schema_version=patchReleaseParams.schema_version, + ) + if patchReleaseParams.cherry_pick_enabled: + # cherry-pick the pr's with backport labels + cherrypick_prs_with_backport_labels( + patchReleaseParams.earliest_pr_date_value, + patchReleaseParams.main_branch, + patchReleaseParams.release_branch_name, + patchReleaseParams.repository, + ) + # commit all changes + commit_changes_for_version_bump( + patchReleaseParams.project_name, patchReleaseParams.project_version + ) + # create and push release-$minor_version-push-$curTime branch + release_pr_branch = f"{patchReleaseParams.release_branch_name}_{uuid.uuid4()}" + create_and_checkout_branch(release_pr_branch) + if not patchReleaseParams.is_test: + push_branch(release_pr_branch) + + print("### Done Patch release flow executed successfully. ###") + + +def prepare_upcoming_version_branch(upcoming_params: UpcomingVersionBranchParams): + print( + f"### {upcoming_params.upcoming_version_branch} flow is being executed... ###" + ) + # checkout master + checkout_branch(upcoming_params.main_branch, upcoming_params.is_test) + # create master-update-version-$curtime branch + create_and_checkout_branch(upcoming_params.upcoming_version_branch) + # update version info with upcoming version on configure.in + update_version_in_configure_in( + upcoming_params.project_name, + upcoming_params.configure_in_path, + upcoming_params.upcoming_devel_version, + ) + # update version info with upcoming version on config.py + update_version_with_upcoming_version_in_config_py( + upcoming_params.config_py_path, upcoming_params.upcoming_minor_version + ) + # execute autoconf -f + execute_autoconf_f() + # update version info with upcoming version on multi_extension.out + update_version_in_multi_extension_out( + upcoming_params.multi_extension_out_path, upcoming_params.upcoming_devel_version + ) + # update detail lines with minor version + update_detail_strings_in_multi_extension_out( + upcoming_params.multi_extension_out_path, upcoming_params.upcoming_minor_version + ) + # get current schema version from citus.control + current_schema_version = get_current_schema_from_citus_control( + upcoming_params.citus_control_file_path + ) + # add downgrade script in multi_extension.sql file + add_downgrade_script_in_multi_extension_file( + current_schema_version, + upcoming_params.multi_extension_sql_path, + upcoming_params.upcoming_minor_version, + MULTI_EXT_SQL_TEMPLATE_FILE, + ) + # add downgrade script in multi_extension.out file + add_downgrade_script_in_multi_extension_file( + current_schema_version, + upcoming_params.multi_extension_out_path, + upcoming_params.upcoming_minor_version, + MULTI_EXT_OUT_TEMPLATE_FILE, + ) + # create a new sql file for upgrade path: + upgrade_file = create_new_sql_for_upgrade_path( + current_schema_version, + upcoming_params.distributed_dir_path, + upcoming_params.upcoming_minor_version, + ) + # create a new sql file for downgrade path: + downgrade_file = create_new_sql_for_downgrade_path( + current_schema_version, + upcoming_params.downgrades_dir_path, + upcoming_params.upcoming_minor_version, + ) + + # change version in citus.control file + default_upcoming_schema_version = f"{upcoming_params.upcoming_minor_version}-1" + update_schema_version_in_citus_control( + upcoming_params.citus_control_file_path, default_upcoming_schema_version + ) + # commit and push changes on master-update-version-$curtime branch + commit_changes_for_version_bump( + upcoming_params.project_name, upcoming_params.upcoming_devel_version + ) + if not upcoming_params.is_test: + push_branch(upcoming_params.upcoming_version_branch) + + # create pull request + create_pull_request_for_upcoming_version_branch( + upcoming_params.repository, + upcoming_params.main_branch, + upcoming_params.upcoming_version_branch, + upcoming_params.upcoming_devel_version, + ) + print(f"### Done {upcoming_params.upcoming_version_branch} flow executed. ###") + return MigrationFiles(upgrade_file=upgrade_file, downgrade_file=downgrade_file) + + +def prepare_release_branch_for_major_release(majorReleaseParams: MajorReleaseParams): + print( + f"### {majorReleaseParams.release_branch_name} release branch flow is being executed... ###" + ) + # checkout master + checkout_branch(majorReleaseParams.main_branch, majorReleaseParams.is_test) + # create release branch in release-X.Y format + create_and_checkout_branch(majorReleaseParams.release_branch_name) + # change version info in configure.in file + update_version_in_configure_in( + majorReleaseParams.project_name, + majorReleaseParams.configure_in_path, + majorReleaseParams.project_version, + ) + # execute "autoconf -f" + execute_autoconf_f() + # change version info in multi_extension.out + update_version_in_multi_extension_out( + majorReleaseParams.multi_extension_out_path, majorReleaseParams.project_version + ) + # commit all changes + commit_changes_for_version_bump( + majorReleaseParams.project_name, majorReleaseParams.project_version + ) + # push release branch (No PR creation!!!) + if not majorReleaseParams.is_test: + push_branch(majorReleaseParams.release_branch_name) + print( + f"### Done {majorReleaseParams.release_branch_name} release branch flow executed .###" + ) + + +def cherrypick_prs_with_backport_labels( + earliest_pr_date, main_branch, release_branch_name, repository +): + print( + f"### Getting all PR with backport label after {datetime.strftime(earliest_pr_date, '%Y.%m.%d %H:%M')}... ### " + ) + prs_with_earliest_date = get_prs_for_patch_release( + repository, earliest_pr_date, main_branch + ) + # get commits for selected prs with backport label + prs_with_backport = filter_prs_by_label(prs_with_earliest_date, "backport") + print( + f"### Done {len(prs_with_backport)} PRs with backport label found. PR list is as below. ###" + ) + for pr in prs_with_backport: + print(f"\tNo:{pr.number} Title:{pr.title}") + # cherrypick all commits with backport label + print(f"### Cherry-picking PRs to {release_branch_name}... ###") + cherry_pick_prs(prs_with_backport) + print( + f"### Done Cherry pick completed for all PRs on branch {release_branch_name}. ###" + ) + + +def create_pull_request_for_upcoming_version_branch( + repository, main_branch, upcoming_version_branch, upcoming_version +): + print(f"### Creating pull request for {upcoming_version_branch}... ###") + pr_result = create_pr_with_repo( + repo=repository, + pr_branch=upcoming_version_branch, + pr_title=f"Bump Citus to {upcoming_version}", + base_branch=main_branch, + ) + print( + f"### Done Pull request created. PR no:{pr_result.number} PR URL: {pr_result.url}. ### " + ) + + +def push_branch(upcoming_version_branch): + print(f"Pushing changes for {upcoming_version_branch} into remote origin... ###") + run(f"git push --set-upstream origin {upcoming_version_branch}") + print(f"### Done Changes pushed for {upcoming_version_branch}. ###") + + +def commit_changes_for_version_bump(project_name, project_version): + current_branch = get_current_branch(os.getcwd()) + print(f"### Committing changes for branch {current_branch}... ###") + run("git add .") + run(f' git commit -m "Bump {project_name} version to {project_version} "') + print(f"### Done Changes committed for {current_branch}. ###") + + +def update_schema_version_in_citus_control(citus_control_file_path, schema_version): + print( + f"### Updating {citus_control_file_path} file with the version {schema_version}... ###" + ) + if not replace_line_in_file( + citus_control_file_path, + CITUS_CONTROL_SEARCH_PATTERN, + f"default_version = '{schema_version}'", + ): + raise ValueError(f"{citus_control_file_path} does not have match for version") + print( + f"### Done {citus_control_file_path} file is updated with the schema version {schema_version}. ###" + ) + + +def add_downgrade_script_in_multi_extension_file( + current_schema_version, + multi_extension_out_path, + upcoming_minor_version, + template_file: str, +): + print( + f"### Adding downgrade scripts from version {current_schema_version} to " + f"{upcoming_minor_version} on {multi_extension_out_path}... ### " + ) + env = get_template_environment(TEMPLATES_PATH) + template = env.get_template( + template_file + ) # multi_extension_out_prepare_release.tmpl multi_extension_sql_prepare_release.tmpl + string_to_prepend = f"{template.render(current_schema_version=current_schema_version, upcoming_minor_version=f'{upcoming_minor_version}-1')}\n" + + if not prepend_line_in_file( + multi_extension_out_path, + "DROP TABLE prev_objects, extension_diff;", + string_to_prepend, + ): + raise ValueError( + f"Downgrade scripts could not be added in {multi_extension_out_path} since " + f"'DROP TABLE prev_objects, extension_diff;' script could not be found " + ) + print( + f"### Done Test downgrade scripts successfully added in {multi_extension_out_path}. ###" + ) + + +def get_current_schema_from_citus_control(citus_control_file_path: str) -> str: + print(f"### Reading current schema version from {citus_control_file_path}... ###") + current_schema_version = "" + with open( + citus_control_file_path, + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as cc_reader: + cc_file_content = cc_reader.read() + _, cc_line = find_nth_matching_line_and_line_number( + cc_file_content, CITUS_CONTROL_SEARCH_PATTERN, 1 + ) + schema_not_found = False + if len(cc_line) > 0: + line_parts = cc_line.split("=") + if len(line_parts) == 2: + current_schema_version = line_parts[1] + else: + schema_not_found = True + else: + schema_not_found = True + + if schema_not_found: + raise ValueError("Version info could not be found in citus.control file") + + current_schema_version = current_schema_version.strip(" '") + print(f"### Done Schema version is {current_schema_version}. ###") + return current_schema_version + + +def update_version_with_upcoming_version_in_config_py( + config_py_path, upcoming_minor_version +): + print( + f"### Updating {config_py_path} file with the upcoming version {upcoming_minor_version}... ###" + ) + if not replace_line_in_file( + config_py_path, + CONFIG_PY_MASTER_VERSION_SEARCH_PATTERN, + f"MASTER_VERSION = '{upcoming_minor_version}'", + ): + raise ValueError(f"{config_py_path} does not have match for version") + print( + f"### Done {config_py_path} file updated with the upcoming version {upcoming_minor_version}. ###" + ) + + +def update_version_in_multi_extension_out(multi_extension_out_path, project_version): + print( + f"### Updating {multi_extension_out_path} file with the project version {project_version}... ###" + ) + + if not replace_line_in_file( + multi_extension_out_path, MULTI_EXT_DEVEL_SEARCH_PATTERN, f" {project_version}" + ): + raise ValueError( + f"{multi_extension_out_path} does not contain the version with pattern {MULTI_EXT_DEVEL_SEARCH_PATTERN}" + ) + print( + f"### Done {multi_extension_out_path} file is updated with project version {project_version}. ###" + ) + + +def update_detail_strings_in_multi_extension_out( + multi_extension_out_path, minor_version +): + print( + f"### Updating {multi_extension_out_path} detail lines file with the project version {minor_version}... ###" + ) + + if not replace_line_in_file( + multi_extension_out_path, + MULTI_EXT_DETAIL1_PATTERN, + f"{MULTI_EXT_DETAIL_PREFIX}{minor_version}{MULTI_EXT_DETAIL1_SUFFIX}", + ): + raise ValueError( + f"{multi_extension_out_path} does not contain the version with pattern {MULTI_EXT_DETAIL1_PATTERN}" + ) + + if not replace_line_in_file( + multi_extension_out_path, + MULTI_EXT_DETAIL2_PATTERN, + f"{MULTI_EXT_DETAIL_PREFIX}{minor_version}{MULTI_EXT_DETAIL2_SUFFIX}", + ): + raise ValueError( + f"{multi_extension_out_path} does not contain the version with pattern {MULTI_EXT_DETAIL2_PATTERN}" + ) + + print( + f"### Done {multi_extension_out_path} detail lines updated with project version {minor_version}. ###" + ) + + +def update_version_in_multi_extension_out_for_patch( + multi_extension_out_path, project_version +): + print( + f"### Updating {multi_extension_out_path} file with the project version {project_version}... ###" + ) + + if not replace_line_in_file( + multi_extension_out_path, + get_patch_version_regex(project_version), + f" {project_version}", + ): + raise ValueError( + f"{multi_extension_out_path} does not contain the version with pattern {get_patch_version_regex(project_version)}" + ) + print( + f"### Done {multi_extension_out_path} file is updated with project version {project_version}. ###" + ) + + +def execute_autoconf_f(): + print("### Executing autoconf -f command... ###") + run("autoconf -f") + print("### Done autoconf -f executed. ###") + + +def update_version_in_configure_in(project_name, configure_in_path, project_version): + print(f"### Updating version on file {configure_in_path}... ###") + if not replace_line_in_file( + configure_in_path, + CONFIGURE_IN_SEARCH_PATTERN, + f"AC_INIT([{repo_details[project_name]['configure-in-str']}], [{project_version}])", + ): + raise ValueError(f"{configure_in_path} does not have match for version") + print( + f"### Done {configure_in_path} file is updated with project version {project_version}. ###" + ) + + +def create_and_checkout_branch(release_branch_name): + print( + f"### Creating release branch with name {release_branch_name} from {get_current_branch(os.getcwd())}... ###" + ) + run(f"git checkout -b {release_branch_name}") + print(f"### Done {release_branch_name} created. ###") + + +def checkout_branch(branch_name, is_test): + print(f"### Checking out {branch_name}... ###") + run(f"git checkout {branch_name}") + if not is_test: + run("git pull") + + print(f"### Done {branch_name} checked out and pulled. ###") + + +def upgrade_sql_file_name(current_schema_version, upcoming_minor_version): + return f"citus--{current_schema_version}--{upcoming_minor_version}-1.sql" + + +def create_new_sql_for_upgrade_path( + current_schema_version, distributed_dir_path, upcoming_minor_version +): + newly_created_sql_file = upgrade_sql_file_name( + current_schema_version, upcoming_minor_version + ) + print(f"### Creating upgrade file {newly_created_sql_file}... ###") + with open( + f"{distributed_dir_path}/{newly_created_sql_file}", + "w", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as f_writer: + content = f"-- citus--{current_schema_version}--{upcoming_minor_version}-1" + content = content + "\n\n" + content = content + f"-- bump version to {upcoming_minor_version}-1" + "\n\n" + f_writer.write(content) + print(f"### Done {newly_created_sql_file} created. ###") + return newly_created_sql_file + + +def create_new_sql_for_downgrade_path( + current_schema_version, distributed_dir_path, upcoming_minor_version +): + newly_created_sql_file = ( + f"citus--{upcoming_minor_version}-1--{current_schema_version}.sql" + ) + print(f"### Creating downgrade file {newly_created_sql_file}... ###") + with open( + f"{distributed_dir_path}/{newly_created_sql_file}", + "w", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as f_writer: + content = f"-- citus--{upcoming_minor_version}-1--{current_schema_version}" + content = content + "\n" + content = ( + content + f"-- this is an empty downgrade path since " + f"{upgrade_sql_file_name(current_schema_version, upcoming_minor_version)} " + f"is empty for now" + "\n" + ) + f_writer.write(content) + print(f"### Done {newly_created_sql_file} created. ###") + return newly_created_sql_file + + +CHECKOUT_DIR = "citus_temp" + + +def validate_parameters(major_release_flag: bool): + if major_release_flag and arguments.cherry_pick_enabled: + raise ValueError("Cherry pick could be enabled only for patch release") + + if major_release_flag and arguments.earliest_pr_date: + raise ValueError("earliest_pr_date could not be used for major releases") + + if major_release_flag and arguments.schema_version: + raise ValueError("schema_version could not be set for major releases") + + if ( + not major_release_flag + and arguments.cherry_pick_enabled + and not arguments.earliest_pr_date + ): + raise ValueError( + "earliest_pr_date parameter could not be empty when cherry pick is enabled and release is major." + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--gh_token", required=True) + parser.add_argument( + "--prj_name", choices=["citus", "citus-enterprise"], required=True + ) + parser.add_argument("--prj_ver", required=True) + parser.add_argument("--main_branch") + parser.add_argument("--earliest_pr_date") + parser.add_argument("--cherry_pick_enabled", action="store_true") + parser.add_argument("--is_test", action="store_true") + parser.add_argument("--schema_version", nargs="?") + arguments = parser.parse_args() + execution_path = f"{os.getcwd()}/{CHECKOUT_DIR}" + major_release = is_major_release(arguments.prj_ver) + validate_parameters(major_release) + + try: + initialize_env(execution_path, arguments.prj_name, CHECKOUT_DIR) + + is_cherry_pick_enabled = arguments.cherry_pick_enabled + main_branch = ( + arguments.main_branch + if arguments.main_branch + else repo_details[arguments.prj_name]["branch"] + ) + print(f"Using main branch {main_branch} for the repo {arguments.prj_name}.") + os.chdir(execution_path) + print(f"Executing in path {execution_path}") + earliest_pr_date_value = ( + None + if major_release or not is_cherry_pick_enabled + else datetime.strptime(arguments.earliest_pr_date, "%Y.%m.%d") + ) + proj_params = ProjectParams( + project_name=arguments.prj_name, + project_version=arguments.prj_ver, + main_branch=main_branch, + schema_version=arguments.schema_version, + ) + update_release( + github_token=arguments.gh_token, + project_params=proj_params, + earliest_pr_date=earliest_pr_date_value, + is_test=arguments.is_test, + cherry_pick_enabled=arguments.cherry_pick_enabled, + exec_path=execution_path, + ) + finally: + if not arguments.is_test: + remove_cloned_code(execution_path) diff --git a/packaging_automation/publish-into-ms-packages.py b/packaging_automation/publish-into-ms-packages.py index ae8f4562..0e243a62 100644 --- a/packaging_automation/publish-into-ms-packages.py +++ b/packaging_automation/publish-into-ms-packages.py @@ -1,141 +1,141 @@ -import argparse -import json -import os -import re -import time - -from .common_tool_methods import run_with_output, str_array_to_str - -ms_package_repo_map = { - "el/8": "centos-8", - "el/7": "centos-7", - "debian/buster": "debian-buster", - "debian/jessie": "debian-jessie", - "debian/stretch": "debian-stretch", - "ubuntu/bionic": "ubuntu-bionic", - "ubuntu/focal": "ubuntu-focal", -} - -# Ubuntu focal repo id is not returned from repoclient list so we had to add this repo manually -UBUNTU_FOCAL_REPO_ID = "6009d702435efdb9f7acd170" -DEB_BASED_REPOS = ["citus-ubuntu", "citus-debian"] -AMD_SUFFIX = "amd64.deb" - - -def publish_single_package(package_path: str, repo): - result = run_with_output( - f"repoclient package add --repoID {repo['id']} {package_path}" - ) - - return json.loads(result.stdout) - - -def get_citus_repos(): - repo_list = run_with_output("repoclient repo list") - - all_repos = json.loads(repo_list.stdout) - - repos = {} - for repo in all_repos: - if not repo["url"].startswith("citus-"): - continue - name = repo["url"][len("citus-") :] - if name in ("ubuntu", "debian"): - # Suffix distribution - name = name + "-" + repo["distribution"] - else: - # Put dash before number - name = re.sub(r"(\d+)", r"-\1", name) - repos[name] = repo - # Adding ubuntu-focal manually because list does not include ubuntu-focal - repos["ubuntu-focal"] = { - "url": "ubuntu-focal", - "distribution": "focal", - "id": UBUNTU_FOCAL_REPO_ID, - } - return repos - - -# Ensure deb packages contain the distribution, so they do not conflict -def suffix_deb_package_with_distribution(repository, package_file_path): - if not package_file_path.endswith(AMD_SUFFIX): - raise ValueError( - f"Package should have ended with {AMD_SUFFIX}: {package_file_path}" - ) - old_package_path = package_file_path - package_prefix = package_file_path[: -len(AMD_SUFFIX)] - package_file_path = f"{package_prefix}+{repository['distribution']}_{AMD_SUFFIX}" - os.rename(old_package_path, package_file_path) - return package_file_path - - -def publish_packages(target_platform, citus_repos, packages_dir: str): - responses = {} - for package_file in os.listdir(packages_dir): - print(f"Target Platform is {target_platform}") - repo_platform = ms_package_repo_map[target_platform] - repo = citus_repos[repo_platform] - package_path = os.path.join(packages_dir, package_file) - - # Ensure deb packages contain the distribution, so they do not conflict - if repo["url"] in DEB_BASED_REPOS: - if repo["distribution"] not in package_file: - package_path = suffix_deb_package_with_distribution(repo, package_path) - - # Publish packages - if os.path.isfile(package_path) and package_file.endswith((".rpm", ".deb")): - publish_result = publish_single_package(package_path, repo) - responses[package_path] = publish_result - print( - "Waiting for 30 seconds to avoid concurrency problems on publishing server" - ) - time.sleep(30) - - return responses - - -def check_submissions(all_responses): - # Check 15 times if there are any packages that we couldn't publish - unfinished_submissions = all_responses.copy() - finished_submissions = {} - for i in range(15): - for pack_path, response in all_responses.items(): - package_id = response["Location"].split("/")[-1] - try: - run_with_output(f"repoclient package check {package_id}") - finished_submissions[pack_path] = response - del unfinished_submissions[pack_path] - except ValueError: - print(pack_path, "was not published yet") - - if not unfinished_submissions: - break - time.sleep(i) - - if finished_submissions: - print( - f"The following packages were published successfuly:\n" - f"{str_array_to_str([os.path.basename(s) for s in finished_submissions])}\n" - ) - - if unfinished_submissions: - print( - f"The following packages were not published successfuly:\n" - f"{str_array_to_str([os.path.basename(s) for s in unfinished_submissions])}\n" - ) - raise ValueError("Some packages were not finished publishing") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--platform", choices=ms_package_repo_map.keys()) - parser.add_argument("--packages_dir", required=True) - args = parser.parse_args() - - citus_repos = get_citus_repos() - - submission_responses = publish_packages( - args.platform, citus_repos, args.packages_dir - ) - - check_submissions(submission_responses) +import argparse +import json +import os +import re +import time + +from .common_tool_methods import run_with_output, str_array_to_str + +ms_package_repo_map = { + "el/8": "centos-8", + "el/7": "centos-7", + "debian/buster": "debian-buster", + "debian/jessie": "debian-jessie", + "debian/stretch": "debian-stretch", + "ubuntu/bionic": "ubuntu-bionic", + "ubuntu/focal": "ubuntu-focal", +} + +# Ubuntu focal repo id is not returned from repoclient list so we had to add this repo manually +UBUNTU_FOCAL_REPO_ID = "6009d702435efdb9f7acd170" +DEB_BASED_REPOS = ["citus-ubuntu", "citus-debian"] +AMD_SUFFIX = "amd64.deb" + + +def publish_single_package(package_path: str, repo): + result = run_with_output( + f"repoclient package add --repoID {repo['id']} {package_path}" + ) + + return json.loads(result.stdout) + + +def get_citus_repos(): + repo_list = run_with_output("repoclient repo list") + + all_repos = json.loads(repo_list.stdout) + + repos = {} + for repo in all_repos: + if not repo["url"].startswith("citus-"): + continue + name = repo["url"][len("citus-") :] + if name in ("ubuntu", "debian"): + # Suffix distribution + name = name + "-" + repo["distribution"] + else: + # Put dash before number + name = re.sub(r"(\d+)", r"-\1", name) + repos[name] = repo + # Adding ubuntu-focal manually because list does not include ubuntu-focal + repos["ubuntu-focal"] = { + "url": "ubuntu-focal", + "distribution": "focal", + "id": UBUNTU_FOCAL_REPO_ID, + } + return repos + + +# Ensure deb packages contain the distribution, so they do not conflict +def suffix_deb_package_with_distribution(repository, package_file_path): + if not package_file_path.endswith(AMD_SUFFIX): + raise ValueError( + f"Package should have ended with {AMD_SUFFIX}: {package_file_path}" + ) + old_package_path = package_file_path + package_prefix = package_file_path[: -len(AMD_SUFFIX)] + package_file_path = f"{package_prefix}+{repository['distribution']}_{AMD_SUFFIX}" + os.rename(old_package_path, package_file_path) + return package_file_path + + +def publish_packages(target_platform, citus_repos, packages_dir: str): + responses = {} + for package_file in os.listdir(packages_dir): + print(f"Target Platform is {target_platform}") + repo_platform = ms_package_repo_map[target_platform] + repo = citus_repos[repo_platform] + package_path = os.path.join(packages_dir, package_file) + + # Ensure deb packages contain the distribution, so they do not conflict + if repo["url"] in DEB_BASED_REPOS: + if repo["distribution"] not in package_file: + package_path = suffix_deb_package_with_distribution(repo, package_path) + + # Publish packages + if os.path.isfile(package_path) and package_file.endswith((".rpm", ".deb")): + publish_result = publish_single_package(package_path, repo) + responses[package_path] = publish_result + print( + "Waiting for 30 seconds to avoid concurrency problems on publishing server" + ) + time.sleep(30) + + return responses + + +def check_submissions(all_responses): + # Check 15 times if there are any packages that we couldn't publish + unfinished_submissions = all_responses.copy() + finished_submissions = {} + for i in range(15): + for pack_path, response in all_responses.items(): + package_id = response["Location"].split("/")[-1] + try: + run_with_output(f"repoclient package check {package_id}") + finished_submissions[pack_path] = response + del unfinished_submissions[pack_path] + except ValueError: + print(pack_path, "was not published yet") + + if not unfinished_submissions: + break + time.sleep(i) + + if finished_submissions: + print( + f"The following packages were published successfuly:\n" + f"{str_array_to_str([os.path.basename(s) for s in finished_submissions])}\n" + ) + + if unfinished_submissions: + print( + f"The following packages were not published successfuly:\n" + f"{str_array_to_str([os.path.basename(s) for s in unfinished_submissions])}\n" + ) + raise ValueError("Some packages were not finished publishing") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--platform", choices=ms_package_repo_map.keys()) + parser.add_argument("--packages_dir", required=True) + args = parser.parse_args() + + citus_repos = get_citus_repos() + + submission_responses = publish_packages( + args.platform, citus_repos, args.packages_dir + ) + + check_submissions(submission_responses) diff --git a/packaging_automation/publish_docker.py b/packaging_automation/publish_docker.py index 2f9ff8fd..840cb1f1 100644 --- a/packaging_automation/publish_docker.py +++ b/packaging_automation/publish_docker.py @@ -1,349 +1,349 @@ -import argparse -import os -from enum import Enum -from typing import Tuple, List - -import docker -import pathlib2 -from parameters_validation import validate_parameters - -from .common_tool_methods import remove_prefix, get_current_branch, is_tag_on_branch -from .common_validations import is_tag - -BASE_PATH = pathlib2.Path(__file__).parents[1] - - -class GithubPipelineTriggerType(Enum): - push = 1 - schedule = 2 - workflow_dispatch = 3 - - -class GithubTriggerEventSource(Enum): - branch_push = 1 - tag_push = 2 - - -class DockerImageType(Enum): - latest = 1 - alpine = 2 - nightly = 3 - postgres_14 = 4 - postgres_15 = 5 - - -class ManualTriggerType(Enum): - main = 1 - tags = 2 - nightly = 3 - - -class ScheduleType(Enum): - regular = 1 - nightly = 2 - - -DEFAULT_BRANCH_NAME = "master" -docker_image_info_dict = { - DockerImageType.latest: { - "file-name": "Dockerfile", - "docker-tag": "latest", - "schedule-type": ScheduleType.regular, - }, - DockerImageType.alpine: { - "file-name": "alpine/Dockerfile", - "docker-tag": "alpine", - "schedule-type": ScheduleType.regular, - }, - DockerImageType.postgres_14: { - "file-name": "postgres-14/Dockerfile", - "docker-tag": "pg14", - "schedule-type": ScheduleType.regular, - }, - DockerImageType.postgres_15: { - "file-name": "postgres-15/Dockerfile", - "docker-tag": "pg15", - "schedule-type": ScheduleType.regular, - }, - DockerImageType.nightly: { - "file-name": "nightly/Dockerfile", - "docker-tag": "nightly", - "schedule-type": ScheduleType.nightly, - }, -} -DOCKER_IMAGE_NAME = "citusdata/citus" - -docker_client = docker.from_env() - -docker_api_client = docker.APIClient() - - -def regular_images_to_be_built( - docker_image_type: DockerImageType = None, -) -> List[DockerImageType]: - if docker_image_type: - return [docker_image_type] - return [ - key - for key, value in docker_image_info_dict.items() - if value["schedule-type"] == ScheduleType.regular - ] - - -# When pipeline triggered, if the event source is -# triggered by branch push or a schedule on pipeline, github_ref format is : refs/heads/{branch_name} -# if tiggered by tag push, github_ref format is: refs/heads/{tag_name} -def decode_triggering_event_info( - github_ref: str, -) -> Tuple[GithubTriggerEventSource, str]: - parts = github_ref.split("/") - if len(parts) != 3 or parts[1] not in ("tags", "heads"): - raise ValueError( - "github ref should be like one of the following two formats: " - "refs/heads/{branch_name}, refs/tags/{tag_name}" - ) - if parts[1] == "tags": - return GithubTriggerEventSource.tag_push, parts[2] - return GithubTriggerEventSource.branch_push, parts[2] - - -@validate_parameters -def decode_tag_parts(tag_name: is_tag(str)) -> List[str]: - return remove_prefix(tag_name, "v").split(".") - - -def get_image_tag(tag_prefix: str, docker_image_type: DockerImageType) -> str: - tag_suffix = ( - "" - if docker_image_type == DockerImageType.latest - else f"-{docker_image_info_dict[docker_image_type]['docker-tag']}" - ) - return f"{tag_prefix}{tag_suffix}" - - -def publish_docker_image_on_push( - docker_image_type: DockerImageType, github_ref: str, will_image_be_published: bool -): - triggering_event_info, resource_name = decode_triggering_event_info(github_ref) - for regular_image_type in regular_images_to_be_built(docker_image_type): - if triggering_event_info == GithubTriggerEventSource.branch_push: - publish_main_docker_images(regular_image_type, will_image_be_published) - else: - publish_tagged_docker_images( - regular_image_type, resource_name, will_image_be_published - ) - - -def publish_docker_image_on_schedule( - docker_image_type: DockerImageType, will_image_be_published: bool -): - if docker_image_type == DockerImageType.nightly: - publish_nightly_docker_image(will_image_be_published) - else: - for regular_image_type in regular_images_to_be_built(docker_image_type): - publish_main_docker_images(regular_image_type, will_image_be_published) - - -def publish_docker_image_manually( - manual_trigger_type_param: ManualTriggerType, - will_image_be_published: bool, - docker_image_type: DockerImageType, - tag_name: str = "", -) -> None: - if manual_trigger_type_param == ManualTriggerType.main and not tag_name: - for it in regular_images_to_be_built(docker_image_type): - publish_main_docker_images(it, will_image_be_published) - elif manual_trigger_type_param == ManualTriggerType.tags and tag_name: - for it in regular_images_to_be_built(docker_image_type): - publish_tagged_docker_images(it, tag_name, will_image_be_published) - elif manual_trigger_type_param == ManualTriggerType.nightly: - publish_nightly_docker_image(will_image_be_published) - - -def publish_main_docker_images( - docker_image_type: DockerImageType, will_image_be_published: bool -): - print(f"Building main docker image for {docker_image_type.name}...") - docker_image_name = f"{DOCKER_IMAGE_NAME}:{docker_image_type.name}" - _, logs = docker_client.images.build( - dockerfile=docker_image_info_dict[docker_image_type]["file-name"], - tag=docker_image_name, - path=".", - ) - flush_logs(logs) - print(f"Main docker image for {docker_image_type.name} built.") - if will_image_be_published: - print(f"Publishing main docker image for {docker_image_type.name}...") - docker_client.images.push(DOCKER_IMAGE_NAME, tag=docker_image_type.name) - print(f"Publishing main docker image for {docker_image_type.name} finished") - else: - current_branch = get_current_branch(os.getcwd()) - if current_branch != DEFAULT_BRANCH_NAME: - print( - f"Since current branch {current_branch} is not equal to " - f"{DEFAULT_BRANCH_NAME} {docker_image_name} will not be pushed." - ) - - -def flush_logs(logs): - for log in logs: - log_str = log.get("stream") - if log_str: - print(log_str, end="") - - -def publish_tagged_docker_images( - docker_image_type, tag_name: str, will_image_be_published: bool -): - print( - f"Building and publishing tagged image {docker_image_type.name} for tag {tag_name}..." - ) - tag_parts = decode_tag_parts(tag_name) - tag_version_part = "" - docker_image_name = f"{DOCKER_IMAGE_NAME}:{docker_image_type.name}" - _, logs = docker_client.images.build( - dockerfile=docker_image_info_dict[docker_image_type]["file-name"], - tag=docker_image_name, - path=".", - ) - flush_logs(logs) - print(f"{docker_image_type.name} image built.Now starting tagging and pushing...") - for tag_part in tag_parts: - tag_version_part = tag_version_part + tag_part - image_tag = get_image_tag(tag_version_part, docker_image_type) - print(f"Tagging {docker_image_name} with the tag {image_tag}...") - docker_api_client.tag(docker_image_name, docker_image_name, image_tag) - print(f"Tagging {docker_image_name} with the tag {image_tag} finished.") - if will_image_be_published: - print(f"Pushing {docker_image_name} with the tag {image_tag}...") - push_logs = docker_client.images.push(DOCKER_IMAGE_NAME, tag=image_tag) - print("Push logs:") - print(push_logs) - print(f"Pushing {docker_image_name} with the tag {image_tag} finished") - else: - print( - f"Skipped pushing {docker_image_type} with the tag {image_tag} since will_image_be_published flag is false" - ) - - tag_version_part = tag_version_part + "." - print( - f"Building and publishing tagged image {docker_image_type.name} for tag {tag_name} finished." - ) - - -def publish_nightly_docker_image(will_image_be_published: bool): - print("Building nightly image...") - docker_image_name = f"{DOCKER_IMAGE_NAME}:{docker_image_info_dict[DockerImageType.nightly]['docker-tag']}" - _, logs = docker_client.images.build( - dockerfile=docker_image_info_dict[DockerImageType.nightly]["file-name"], - tag=docker_image_name, - path=".", - ) - flush_logs(logs) - print("Nightly image build finished.") - - if will_image_be_published: - print("Pushing nightly image...") - docker_client.images.push( - DOCKER_IMAGE_NAME, - tag=docker_image_info_dict[DockerImageType.nightly]["docker-tag"], - ) - print("Nightly image push finished.") - else: - print( - "Nightly image will not be pushed since will_image_be_published flag is false" - ) - - -def validate_and_extract_general_parameters( - docker_image_type_param: str, pipeline_trigger_type_param: str -) -> Tuple[GithubPipelineTriggerType, DockerImageType]: - try: - trigger_type_param = GithubPipelineTriggerType[pipeline_trigger_type_param] - except KeyError: - raise ValueError( - f"trigger_type parameter is invalid. Valid ones are " - f"{','.join([d.name for d in GithubPipelineTriggerType])}." - ) from KeyError - - image_type_invalid_error_message = f"image_type parameter is invalid. Valid ones are {','.join([d.name for d in regular_images_to_be_built()])}." - try: - if docker_image_type_param == "all" or not docker_image_type_param: - docker_image_type = None - else: - docker_image_type = DockerImageType[docker_image_type_param] - except KeyError: - raise ValueError(image_type_invalid_error_message) from KeyError - - return trigger_type_param, docker_image_type - - -def validate_and_extract_manual_exec_params( - manual_trigger_type_param: str, tag_name_param: str -) -> ManualTriggerType: - try: - manual_trigger_type_param = ManualTriggerType[manual_trigger_type_param] - except KeyError: - raise ValueError( - f"manual_trigger_type parameter is invalid. " - f"Valid ones are {','.join([d.name for d in ManualTriggerType])}." - ) from KeyError - - is_tag(tag_name_param) - - return manual_trigger_type_param - - -def get_image_publish_status(github_ref: str, is_test: bool): - if is_test: - return False - triggering_event_info, resource_name = decode_triggering_event_info(github_ref) - if triggering_event_info == GithubTriggerEventSource.tag_push: - if not is_tag_on_branch( - tag_name=resource_name, branch_name=DEFAULT_BRANCH_NAME - ): - return False - return True - current_branch = get_current_branch(os.getcwd()) - if current_branch != DEFAULT_BRANCH_NAME: - return False - return True - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--github_ref", required=True) - parser.add_argument( - "--pipeline_trigger_type", - choices=[e.name for e in GithubPipelineTriggerType], - required=True, - ) - parser.add_argument("--tag_name", nargs="?", default="") - parser.add_argument( - "--manual_trigger_type", choices=[e.name for e in ManualTriggerType] - ) - parser.add_argument("--image_type", choices=[e.name for e in DockerImageType]) - parser.add_argument("--is_test", action="store_true") - args = parser.parse_args() - - pipeline_trigger_type, image_type = validate_and_extract_general_parameters( - args.image_type, args.pipeline_trigger_type - ) - if args.is_test: - print("Script is working in test mode. Images will not be published") - - publish_status = get_image_publish_status(args.github_ref, args.is_test) - if pipeline_trigger_type == GithubPipelineTriggerType.workflow_dispatch: - manual_trigger_type = validate_and_extract_manual_exec_params( - args.manual_trigger_type, args.tag_name - ) - publish_docker_image_manually( - manual_trigger_type_param=manual_trigger_type, - will_image_be_published=publish_status, - docker_image_type=image_type, - tag_name=args.tag_name, - ) - elif pipeline_trigger_type == GithubPipelineTriggerType.push: - publish_docker_image_on_push(image_type, args.github_ref, publish_status) - else: - publish_docker_image_on_schedule(image_type, publish_status) +import argparse +import os +from enum import Enum +from typing import Tuple, List + +import docker +import pathlib2 +from parameters_validation import validate_parameters + +from .common_tool_methods import remove_prefix, get_current_branch, is_tag_on_branch +from .common_validations import is_tag + +BASE_PATH = pathlib2.Path(__file__).parents[1] + + +class GithubPipelineTriggerType(Enum): + push = 1 + schedule = 2 + workflow_dispatch = 3 + + +class GithubTriggerEventSource(Enum): + branch_push = 1 + tag_push = 2 + + +class DockerImageType(Enum): + latest = 1 + alpine = 2 + nightly = 3 + postgres_14 = 4 + postgres_15 = 5 + + +class ManualTriggerType(Enum): + main = 1 + tags = 2 + nightly = 3 + + +class ScheduleType(Enum): + regular = 1 + nightly = 2 + + +DEFAULT_BRANCH_NAME = "master" +docker_image_info_dict = { + DockerImageType.latest: { + "file-name": "Dockerfile", + "docker-tag": "latest", + "schedule-type": ScheduleType.regular, + }, + DockerImageType.alpine: { + "file-name": "alpine/Dockerfile", + "docker-tag": "alpine", + "schedule-type": ScheduleType.regular, + }, + DockerImageType.postgres_14: { + "file-name": "postgres-14/Dockerfile", + "docker-tag": "pg14", + "schedule-type": ScheduleType.regular, + }, + DockerImageType.postgres_15: { + "file-name": "postgres-15/Dockerfile", + "docker-tag": "pg15", + "schedule-type": ScheduleType.regular, + }, + DockerImageType.nightly: { + "file-name": "nightly/Dockerfile", + "docker-tag": "nightly", + "schedule-type": ScheduleType.nightly, + }, +} +DOCKER_IMAGE_NAME = "citusdata/citus" + +docker_client = docker.from_env() + +docker_api_client = docker.APIClient() + + +def regular_images_to_be_built( + docker_image_type: DockerImageType = None, +) -> List[DockerImageType]: + if docker_image_type: + return [docker_image_type] + return [ + key + for key, value in docker_image_info_dict.items() + if value["schedule-type"] == ScheduleType.regular + ] + + +# When pipeline triggered, if the event source is +# triggered by branch push or a schedule on pipeline, github_ref format is : refs/heads/{branch_name} +# if tiggered by tag push, github_ref format is: refs/heads/{tag_name} +def decode_triggering_event_info( + github_ref: str, +) -> Tuple[GithubTriggerEventSource, str]: + parts = github_ref.split("/") + if len(parts) != 3 or parts[1] not in ("tags", "heads"): + raise ValueError( + "github ref should be like one of the following two formats: " + "refs/heads/{branch_name}, refs/tags/{tag_name}" + ) + if parts[1] == "tags": + return GithubTriggerEventSource.tag_push, parts[2] + return GithubTriggerEventSource.branch_push, parts[2] + + +@validate_parameters +def decode_tag_parts(tag_name: is_tag(str)) -> List[str]: + return remove_prefix(tag_name, "v").split(".") + + +def get_image_tag(tag_prefix: str, docker_image_type: DockerImageType) -> str: + tag_suffix = ( + "" + if docker_image_type == DockerImageType.latest + else f"-{docker_image_info_dict[docker_image_type]['docker-tag']}" + ) + return f"{tag_prefix}{tag_suffix}" + + +def publish_docker_image_on_push( + docker_image_type: DockerImageType, github_ref: str, will_image_be_published: bool +): + triggering_event_info, resource_name = decode_triggering_event_info(github_ref) + for regular_image_type in regular_images_to_be_built(docker_image_type): + if triggering_event_info == GithubTriggerEventSource.branch_push: + publish_main_docker_images(regular_image_type, will_image_be_published) + else: + publish_tagged_docker_images( + regular_image_type, resource_name, will_image_be_published + ) + + +def publish_docker_image_on_schedule( + docker_image_type: DockerImageType, will_image_be_published: bool +): + if docker_image_type == DockerImageType.nightly: + publish_nightly_docker_image(will_image_be_published) + else: + for regular_image_type in regular_images_to_be_built(docker_image_type): + publish_main_docker_images(regular_image_type, will_image_be_published) + + +def publish_docker_image_manually( + manual_trigger_type_param: ManualTriggerType, + will_image_be_published: bool, + docker_image_type: DockerImageType, + tag_name: str = "", +) -> None: + if manual_trigger_type_param == ManualTriggerType.main and not tag_name: + for it in regular_images_to_be_built(docker_image_type): + publish_main_docker_images(it, will_image_be_published) + elif manual_trigger_type_param == ManualTriggerType.tags and tag_name: + for it in regular_images_to_be_built(docker_image_type): + publish_tagged_docker_images(it, tag_name, will_image_be_published) + elif manual_trigger_type_param == ManualTriggerType.nightly: + publish_nightly_docker_image(will_image_be_published) + + +def publish_main_docker_images( + docker_image_type: DockerImageType, will_image_be_published: bool +): + print(f"Building main docker image for {docker_image_type.name}...") + docker_image_name = f"{DOCKER_IMAGE_NAME}:{docker_image_type.name}" + _, logs = docker_client.images.build( + dockerfile=docker_image_info_dict[docker_image_type]["file-name"], + tag=docker_image_name, + path=".", + ) + flush_logs(logs) + print(f"Main docker image for {docker_image_type.name} built.") + if will_image_be_published: + print(f"Publishing main docker image for {docker_image_type.name}...") + docker_client.images.push(DOCKER_IMAGE_NAME, tag=docker_image_type.name) + print(f"Publishing main docker image for {docker_image_type.name} finished") + else: + current_branch = get_current_branch(os.getcwd()) + if current_branch != DEFAULT_BRANCH_NAME: + print( + f"Since current branch {current_branch} is not equal to " + f"{DEFAULT_BRANCH_NAME} {docker_image_name} will not be pushed." + ) + + +def flush_logs(logs): + for log in logs: + log_str = log.get("stream") + if log_str: + print(log_str, end="") + + +def publish_tagged_docker_images( + docker_image_type, tag_name: str, will_image_be_published: bool +): + print( + f"Building and publishing tagged image {docker_image_type.name} for tag {tag_name}..." + ) + tag_parts = decode_tag_parts(tag_name) + tag_version_part = "" + docker_image_name = f"{DOCKER_IMAGE_NAME}:{docker_image_type.name}" + _, logs = docker_client.images.build( + dockerfile=docker_image_info_dict[docker_image_type]["file-name"], + tag=docker_image_name, + path=".", + ) + flush_logs(logs) + print(f"{docker_image_type.name} image built.Now starting tagging and pushing...") + for tag_part in tag_parts: + tag_version_part = tag_version_part + tag_part + image_tag = get_image_tag(tag_version_part, docker_image_type) + print(f"Tagging {docker_image_name} with the tag {image_tag}...") + docker_api_client.tag(docker_image_name, docker_image_name, image_tag) + print(f"Tagging {docker_image_name} with the tag {image_tag} finished.") + if will_image_be_published: + print(f"Pushing {docker_image_name} with the tag {image_tag}...") + push_logs = docker_client.images.push(DOCKER_IMAGE_NAME, tag=image_tag) + print("Push logs:") + print(push_logs) + print(f"Pushing {docker_image_name} with the tag {image_tag} finished") + else: + print( + f"Skipped pushing {docker_image_type} with the tag {image_tag} since will_image_be_published flag is false" + ) + + tag_version_part = tag_version_part + "." + print( + f"Building and publishing tagged image {docker_image_type.name} for tag {tag_name} finished." + ) + + +def publish_nightly_docker_image(will_image_be_published: bool): + print("Building nightly image...") + docker_image_name = f"{DOCKER_IMAGE_NAME}:{docker_image_info_dict[DockerImageType.nightly]['docker-tag']}" + _, logs = docker_client.images.build( + dockerfile=docker_image_info_dict[DockerImageType.nightly]["file-name"], + tag=docker_image_name, + path=".", + ) + flush_logs(logs) + print("Nightly image build finished.") + + if will_image_be_published: + print("Pushing nightly image...") + docker_client.images.push( + DOCKER_IMAGE_NAME, + tag=docker_image_info_dict[DockerImageType.nightly]["docker-tag"], + ) + print("Nightly image push finished.") + else: + print( + "Nightly image will not be pushed since will_image_be_published flag is false" + ) + + +def validate_and_extract_general_parameters( + docker_image_type_param: str, pipeline_trigger_type_param: str +) -> Tuple[GithubPipelineTriggerType, DockerImageType]: + try: + trigger_type_param = GithubPipelineTriggerType[pipeline_trigger_type_param] + except KeyError: + raise ValueError( + f"trigger_type parameter is invalid. Valid ones are " + f"{','.join([d.name for d in GithubPipelineTriggerType])}." + ) from KeyError + + image_type_invalid_error_message = f"image_type parameter is invalid. Valid ones are {','.join([d.name for d in regular_images_to_be_built()])}." + try: + if docker_image_type_param == "all" or not docker_image_type_param: + docker_image_type = None + else: + docker_image_type = DockerImageType[docker_image_type_param] + except KeyError: + raise ValueError(image_type_invalid_error_message) from KeyError + + return trigger_type_param, docker_image_type + + +def validate_and_extract_manual_exec_params( + manual_trigger_type_param: str, tag_name_param: str +) -> ManualTriggerType: + try: + manual_trigger_type_param = ManualTriggerType[manual_trigger_type_param] + except KeyError: + raise ValueError( + f"manual_trigger_type parameter is invalid. " + f"Valid ones are {','.join([d.name for d in ManualTriggerType])}." + ) from KeyError + + is_tag(tag_name_param) + + return manual_trigger_type_param + + +def get_image_publish_status(github_ref: str, is_test: bool): + if is_test: + return False + triggering_event_info, resource_name = decode_triggering_event_info(github_ref) + if triggering_event_info == GithubTriggerEventSource.tag_push: + if not is_tag_on_branch( + tag_name=resource_name, branch_name=DEFAULT_BRANCH_NAME + ): + return False + return True + current_branch = get_current_branch(os.getcwd()) + if current_branch != DEFAULT_BRANCH_NAME: + return False + return True + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--github_ref", required=True) + parser.add_argument( + "--pipeline_trigger_type", + choices=[e.name for e in GithubPipelineTriggerType], + required=True, + ) + parser.add_argument("--tag_name", nargs="?", default="") + parser.add_argument( + "--manual_trigger_type", choices=[e.name for e in ManualTriggerType] + ) + parser.add_argument("--image_type", choices=[e.name for e in DockerImageType]) + parser.add_argument("--is_test", action="store_true") + args = parser.parse_args() + + pipeline_trigger_type, image_type = validate_and_extract_general_parameters( + args.image_type, args.pipeline_trigger_type + ) + if args.is_test: + print("Script is working in test mode. Images will not be published") + + publish_status = get_image_publish_status(args.github_ref, args.is_test) + if pipeline_trigger_type == GithubPipelineTriggerType.workflow_dispatch: + manual_trigger_type = validate_and_extract_manual_exec_params( + args.manual_trigger_type, args.tag_name + ) + publish_docker_image_manually( + manual_trigger_type_param=manual_trigger_type, + will_image_be_published=publish_status, + docker_image_type=image_type, + tag_name=args.tag_name, + ) + elif pipeline_trigger_type == GithubPipelineTriggerType.push: + publish_docker_image_on_push(image_type, args.github_ref, publish_status) + else: + publish_docker_image_on_schedule(image_type, publish_status) diff --git a/packaging_automation/pypi_stats_collector.py b/packaging_automation/pypi_stats_collector.py index 2e7156f1..42849dd2 100644 --- a/packaging_automation/pypi_stats_collector.py +++ b/packaging_automation/pypi_stats_collector.py @@ -1,83 +1,83 @@ -from sqlalchemy import Column, Integer, String, Date -from datetime import date, datetime -import pypistats -import json -from .dbconfig import Base, DbParams, db_session -import os - - -# Define the database connection -db_name = os.getenv( - "DB_NAME", -) -db_user = os.getenv("DB_USER_NAME") -db_password = os.getenv("DB_PASSWORD") -db_host_and_port = os.getenv("DB_HOST_AND_PORT") - -db_params = DbParams( - user_name=db_user, - password=db_password, - host_and_port=db_host_and_port, - db_name=db_name, -) - - -# Define the model for the download numbers -class DownloadNumbers(Base): - __tablename__ = "pypi_downloads" - - id = Column(Integer, primary_key=True) - fetch_date = Column(Date) - library_name = Column(String) - download_count = Column(String) - download_date = Column(Date) - - -packages = ["django-multitenant"] - - -def fetch_download_numbers(package_name): - print( - f"Fetching download numbers for {package_name} from pypi.org. Started at {datetime.now()}" - ) - download_numbers = json.loads( - pypistats.overall(package_name, format="json", mirrors=True, total=True) - ) - session = db_session(db_params=db_params, is_test=False, create_db_objects=True) - print( - f"{len(download_numbers['data'])} records fetched from pypi.org. Starting to add to database. Started at {datetime.now()}" - ) - - new_record_count = 0 - existing_record_count = 0 - for downloads in download_numbers["data"]: - existing_record = ( - session.query(DownloadNumbers) - .filter_by( - library_name=package_name, - download_count=downloads["downloads"], - download_date=downloads["date"], - ) - .first() - ) - if not existing_record: - new_record_count += 1 - print(f"Adding {package_name} {downloads['downloads']} {downloads['date']}") - record = DownloadNumbers( - fetch_date=date.today(), - library_name=package_name, - download_count=downloads["downloads"], - download_date=downloads["date"], - ) - session.add(record) - else: - existing_record_count += 1 - - session.commit() - print( - f"Process finished. New records: {new_record_count} Existing records: {existing_record_count}. Finished at {datetime.now()}" - ) - - -for package_name in packages: - fetch_download_numbers(package_name) +from sqlalchemy import Column, Integer, String, Date +from datetime import date, datetime +import pypistats +import json +from .dbconfig import Base, DbParams, db_session +import os + + +# Define the database connection +db_name = os.getenv( + "DB_NAME", +) +db_user = os.getenv("DB_USER_NAME") +db_password = os.getenv("DB_PASSWORD") +db_host_and_port = os.getenv("DB_HOST_AND_PORT") + +db_params = DbParams( + user_name=db_user, + password=db_password, + host_and_port=db_host_and_port, + db_name=db_name, +) + + +# Define the model for the download numbers +class DownloadNumbers(Base): + __tablename__ = "pypi_downloads" + + id = Column(Integer, primary_key=True) + fetch_date = Column(Date) + library_name = Column(String) + download_count = Column(String) + download_date = Column(Date) + + +packages = ["django-multitenant"] + + +def fetch_download_numbers(package_name): + print( + f"Fetching download numbers for {package_name} from pypi.org. Started at {datetime.now()}" + ) + download_numbers = json.loads( + pypistats.overall(package_name, format="json", mirrors=True, total=True) + ) + session = db_session(db_params=db_params, is_test=False, create_db_objects=True) + print( + f"{len(download_numbers['data'])} records fetched from pypi.org. Starting to add to database. Started at {datetime.now()}" + ) + + new_record_count = 0 + existing_record_count = 0 + for downloads in download_numbers["data"]: + existing_record = ( + session.query(DownloadNumbers) + .filter_by( + library_name=package_name, + download_count=downloads["downloads"], + download_date=downloads["date"], + ) + .first() + ) + if not existing_record: + new_record_count += 1 + print(f"Adding {package_name} {downloads['downloads']} {downloads['date']}") + record = DownloadNumbers( + fetch_date=date.today(), + library_name=package_name, + download_count=downloads["downloads"], + download_date=downloads["date"], + ) + session.add(record) + else: + existing_record_count += 1 + + session.commit() + print( + f"Process finished. New records: {new_record_count} Existing records: {existing_record_count}. Finished at {datetime.now()}" + ) + + +for package_name in packages: + fetch_download_numbers(package_name) diff --git a/packaging_automation/requirements.in b/packaging_automation/requirements.in index a910332e..3f45a31f 100644 --- a/packaging_automation/requirements.in +++ b/packaging_automation/requirements.in @@ -1,22 +1,22 @@ -attrs -black -docker -GitPython -Jinja2 -parameters_validation -pathlib2 -psycopg2-binary -pycurl -PyGithub -pytest -python-gnupg -python-string-utils -PyYAML -requests -SQLAlchemy -urllib3 -wheel -python-dotenv -prospector[with_everything] -pypistats -setuptools==67.6.1 +attrs +black +docker +GitPython +Jinja2 +parameters_validation +pathlib2 +psycopg2-binary +pycurl +PyGithub +pytest +python-gnupg +python-string-utils +PyYAML +requests +SQLAlchemy +urllib3 +wheel +python-dotenv +prospector[with_everything] +pypistats +setuptools==67.6.1 diff --git a/packaging_automation/templates/citus-enterprise-pkgvars.tmpl b/packaging_automation/templates/citus-enterprise-pkgvars.tmpl index 90d306c3..56c7cca7 100644 --- a/packaging_automation/templates/citus-enterprise-pkgvars.tmpl +++ b/packaging_automation/templates/citus-enterprise-pkgvars.tmpl @@ -1,5 +1,5 @@ -pkgname=citus-enterprise -pkgdesc='Citus Enterprise' -pkglatest={{version}} -nightlyref=enterprise-master -versioning=fancy +pkgname=citus-enterprise +pkgdesc='Citus Enterprise' +pkglatest={{version}} +nightlyref=enterprise-master +versioning=fancy diff --git a/packaging_automation/templates/citus-enterprise.spec.tmpl b/packaging_automation/templates/citus-enterprise.spec.tmpl index 152d76df..c65b4b1c 100644 --- a/packaging_automation/templates/citus-enterprise.spec.tmpl +++ b/packaging_automation/templates/citus-enterprise.spec.tmpl @@ -1,352 +1,352 @@ -%global pgmajorversion 11 -%global pgpackageversion 11 -%global pginstdir /usr/pgsql-%{pgpackageversion} -%global sname citus-enterprise -%global pname citus -%global debug_package %{nil} -%global unencrypted_package "%{getenv:UNENCRYPTED_PACKAGE}" - -Summary: PostgreSQL-based distributed RDBMS -Name: %{sname}%{?pkginfix}_%{pgmajorversion} -Provides: citus_%{pgmajorversion} -Conflicts: citus_%{pgmajorversion} -Version: {{rpm_version}} -Release: 1%{dist} -License: Commercial -Group: Applications/Databases -Source0: https://github.com/citusdata/citus-enterprise/archive/v{{version}}.tar.gz -URL: https://github.com/citusdata/citus-enterprise -BuildRequires: postgresql%{pgmajorversion}-devel libcurl-devel -Requires: postgresql%{pgmajorversion}-server -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -%description -Citus horizontally scales PostgreSQL across commodity servers -using sharding and replication. Its query engine parallelizes -incoming SQL queries across these servers to enable real-time -responses on large datasets. - -Citus extends the underlying database rather than forking it, -which gives developers and enterprises the power and familiarity -of a traditional relational database. As an extension, Citus -supports new PostgreSQL releases, allowing users to benefit from -new features while maintaining compatibility with existing -PostgreSQL tools. Note that Citus supports many (but not all) SQL -commands. - -%prep -%setup -q -n %{sname}-%{version} - -%build - -currentgccver="$(gcc -dumpversion)" -requiredgccver="4.8.2" -if [ "$(printf '%s\n' "$requiredgccver" "$currentgccver" | sort -V | head -n1)" != "$requiredgccver" ]; then - echo ERROR: At least GCC version "$requiredgccver" is needed to build with security flags - exit 1 -fi - -%configure PG_CONFIG=%{pginstdir}/bin/pg_config --with-extra-version="%{?conf_extra_version}" --with-security-flags CC=$(command -v gcc) -make %{?_smp_mflags} - -%install -%make_install -# Install documentation with a better name: -%{__mkdir} -p %{buildroot}%{pginstdir}/doc/extension -%{__cp} README.md %{buildroot}%{pginstdir}/doc/extension/README-%{sname}.md -# Set paths to be packaged other than LICENSE, README & CHANGELOG.md -echo %{pginstdir}/include/server/citus_*.h >> installation_files.list -echo %{pginstdir}/include/server/distributed/*.h >> installation_files.list -echo %{pginstdir}/share/extension/citus-*.sql >> installation_files.list -[[ -f %{buildroot}%{pginstdir}/bin/pg_send_cancellation ]] && echo %{pginstdir}/bin/pg_send_cancellation >> installation_files.list -%if %{unencrypted_package} != "" - echo %{pginstdir}/lib/citus.so >> installation_files.list - [[ -f %{buildroot}%{pginstdir}/lib/citus_columnar.so ]] && echo %{pginstdir}/lib/citus_columnar.so >> installation_files.list - echo %{pginstdir}/share/extension/citus.control >> installation_files.list - %ifarch ppc64 ppc64le - %else - %if 0%{?rhel} && 0%{?rhel} <= 6 - %else - echo %{pginstdir}/lib/bitcode/%{pname}*.bc >> installation_files.list - echo %{pginstdir}/lib/bitcode/%{pname}/*.bc >> installation_files.list - echo %{pginstdir}/lib/bitcode/%{pname}/*/*.bc >> installation_files.list - - # Columnar does not exist in Citus versions < 10.0 - # At this point, we don't have %{pginstdir}, - # so first check build directory for columnar. - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/columnar/ ]] && echo %{pginstdir}/lib/bitcode/columnar/*.bc >> installation_files.list - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_columnar/ ]] && echo %{pginstdir}/lib/bitcode/citus_columnar/*.bc >> installation_files.list - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_columnar/safeclib ]] && echo %{pginstdir}/lib/bitcode/citus_columnar/safeclib/*.bc >> installation_files.list - %endif - %endif -%else - echo /usr/bin/citus-enterprise-pg-%{pgmajorversion}-setup >> installation_files.list - echo %{pginstdir}/lib/citus_secret_files.metadata >> installation_files.list - echo %{pginstdir}/lib/citus.so.gpg >> installation_files.list - echo %{pginstdir}/share/extension/citus.control.gpg >> installation_files.list - %ifarch ppc64 ppc64le - %else - %if 0%{?rhel} && 0%{?rhel} <= 6 - %else - echo %{pginstdir}/lib/bitcode/%{pname}*.bc.gpg >> installation_files.list - echo %{pginstdir}/lib/bitcode/%{pname}/*.bc.gpg >> installation_files.list - echo %{pginstdir}/lib/bitcode/%{pname}/*/*.bc.gpg >> installation_files.list - - # Columnar does not exist in Citus versions < 10.0 - # At this point, we don't have %{pginstdir}, - # so first check build directory for columnar. - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/columnar/ ]] && echo %{pginstdir}/lib/bitcode/columnar/*.bc.gpg >> installation_files.list - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_columnar/ ]] && echo %{pginstdir}/lib/bitcode/citus_columnar/*.bc.gpg >> installation_files.list - %endif - %endif -%endif -%if %{unencrypted_package} == "" - -set -eu -set +x - -dir="%{buildroot}" -libdir="$dir/%{pginstdir}/lib" -mkdir -p "$libdir" - -# List all files to be encrypted and store it in the libdir as secret_files_list -secret_files_list="$libdir/citus_secret_files.metadata" -find "$dir" -iname "*.so" -o -iname "*.bc" -o -iname "*.control" | sed -e "s@^$dir@@g" > "$secret_files_list" - -PACKAGE_ENCRYPTION_KEY="${PACKAGE_ENCRYPTION_KEY:-}" -if [ -z "$PACKAGE_ENCRYPTION_KEY" ]; then - echo "ERROR: The PACKAGE_ENCRYPTION_KEY environment variable needs to be set" - echo "HINT: If trying to build packages locally, just set it to 'abc' or something" - echo "HINT: If you're trying to build unencrypted packages you should set the UNENCRYPTED_PACKAGE environment variable" - exit 1 -fi - -# create a temporary directory for gpg to use so it doesn't output warnings -temp_gnupghome="$(mktemp -d)" -while read -r unencrypted_file; do - path_unencrypted="$dir$unencrypted_file" - path_encrypted="$path_unencrypted.gpg" - - # encrypt the files using password - # --s2k-* options are there to make sure decrypting/encrypting doesn't - # take minutes - gpg --symmetric \ - --batch \ - --no-tty \ - --yes \ - --cipher-algo AES256 \ - --s2k-mode 3 \ - --s2k-count 1000000 \ - --s2k-digest-algo SHA512 \ - --passphrase-fd 0 \ - --homedir "$temp_gnupghome" \ - --output "$path_encrypted" \ - "$path_unencrypted" \ - <<< "$PACKAGE_ENCRYPTION_KEY" - - # keep permissions and ownership the same, so we can restore it later - # when decrypting - chmod --reference "$path_unencrypted" "$path_encrypted" - chown --reference "$path_unencrypted" "$path_encrypted" - - # remove the unencrypted file from the package - rm "$path_unencrypted" -done < "$secret_files_list" - -# remove the temporary gpg directory -rm -rf "$temp_gnupghome" - - -bindir="$dir/usr/bin" -mkdir -p "$bindir" - - -#------- START OF DECRYPT SCRIPT -------- -# Create file used to decrypt -cat > "$bindir/citus-enterprise-pg-%{pgmajorversion}-setup" << EOF -#!/bin/sh - -set -eu - -pg_version=%{pgmajorversion} -libdir="%{pginstdir}/lib" -secret_files_list="\$libdir/citus_secret_files.metadata" - -# Make sure the script is being run as root -if [ "\$(id -u)" -ne "0" ]; then - echo "ERROR: citus-enterprise-pg-\$pg_version-setup needs to be run as root" - echo "HINT: try running \"sudo citus-enterprise-pg-\$pg_version-setup\" instead" - exit 1 -fi - - -echo " -Your use of this software is subject to the terms and conditions of the license -agreement by which you acquired this software. If you are a volume license -customer, use of this software is subject to your volume license agreement. -You may not use this software if you have not validly acquired a license for -the software from Microsoft or its licensed distributors. - -BY USING THE SOFTWARE, YOU ACCEPT THIS AGREEMENT. -" - -CITUS_ACCEPT_LICENSE="\${CITUS_ACCEPT_LICENSE:-}" - -interactive_license=false -while [ -z "\$CITUS_ACCEPT_LICENSE" ]; do - interactive_license=true - echo "Do you accept these terms? YES/NO" - read -r CITUS_ACCEPT_LICENSE -done - -case "\$CITUS_ACCEPT_LICENSE" in - YES );; - y|Y|Yes|yes ) - echo "ERROR: Only YES is accepted (all capital letters)" - exit 1; - ;; - * ) - echo "ERROR: Terms of the software must be accepted" - exit 1 -esac - -if [ \$interactive_license = false ]; then - echo "Accepted terms by using CITUS_ACCEPT_LICENSE=YES environment variable" -fi - -encryption_disclaimer_text=" -Since Citus is a distributed database, data is sent over the network between -nodes. It is YOUR RESPONSIBILITY as an operator to ensure that this traffic is -secure. - -Since Citus version 8.1.0 (released 2018-12-17) the traffic between the -different nodes in the cluster is encrypted for NEW installations. This is done -by using TLS with self-signed certificates. This means that this does NOT -protect against Man-In-The-Middle attacks. This only protects against passive -eavesdropping on the network. - -This automatic TLS setup of self-signed certificates and TLS is NOT DONE in the -following cases: -1. The Citus clusters was originally created with a Citus version before 8.1.0. - Even when the cluster is later upgraded to version 8.1.0 or higher. This is - to make sure partially upgraded clusters continue to work. -2. The ssl Postgres configuration option is already set to 'on'. This indicates - that the operator has set up their own certificates. - -In these cases it is assumed the operator has set up appropriate security -themselves. - -So, with the default settings Citus clusters are not safe from -Man-In-The-Middle attacks. To secure the traffic completely you need to follow -the practices outlined here: -https://docs.citusdata.com/en/stable/admin_guide/cluster_management.html#connection-management - -Please confirm that you have read this and understand that you should set up -TLS yourself to send traffic between nodes securely: -YES/NO?" - -CITUS_ACCEPT_ENCRYPTION_DISCLAIMER="\${CITUS_ACCEPT_ENCRYPTION_DISCLAIMER:-}" -while [ -z "\$CITUS_ACCEPT_ENCRYPTION_DISCLAIMER" ]; do - echo "\$encryption_disclaimer_text" - read -r CITUS_ACCEPT_ENCRYPTION_DISCLAIMER -done - -case "\$CITUS_ACCEPT_ENCRYPTION_DISCLAIMER" in - YES );; - y|Y|Yes|yes ) - echo "ERROR: Only YES is accepted (all capital letters)" - exit 1; - ;; - * ) - echo "ERROR: Disclaimer about encrypted traffic must be accepted before installing" - exit 1 -esac - -# create a temporary directory for gpg to use so it doesn't output warnings -temp_gnupghome="\$(mktemp -d)" -CITUS_LICENSE_KEY="\${CITUS_LICENSE_KEY:-}" -while [ -z "\$CITUS_LICENSE_KEY" ]; do - echo '' - echo 'Please enter license key:' - read -r CITUS_LICENSE_KEY -done - -# Try to decrypt the first file in the list to check if the key is correct -if ! gpg --output "/dev/null" \ - --batch --no-tty --yes --quiet \ - --passphrase "\$CITUS_LICENSE_KEY" \ - --homedir "\$temp_gnupghome" \ - --decrypt "\$(head -n 1 "\$secret_files_list").gpg" 2> /dev/null; then - echo "ERROR: Invalid license key supplied" - exit 1 -fi - -echo "License key is valid" -echo "Installing..." - -# Decrypt all the encrypted files -while read -r path_unencrypted; do - path_encrypted="\$path_unencrypted.gpg" - # decrypt the encrypted file - gpg --output "\$path_unencrypted" \ - --batch --no-tty --yes --quiet \ - --passphrase "\$CITUS_LICENSE_KEY" \ - --homedir "\$temp_gnupghome" \ - --decrypt "\$path_encrypted" - - # restore permissions and ownership - chmod --reference "\$path_encrypted" "\$path_unencrypted" - chown --reference "\$path_encrypted" "\$path_unencrypted" -done < "\$secret_files_list" - -# remove the temporary gpg directory -rm -rf "\$temp_gnupghome" -EOF - -#------- END OF DECRYPT SCRIPT -------- - -chmod +x "$bindir/citus-enterprise-pg-%{pgmajorversion}-setup" - -cat "$bindir/citus-enterprise-pg-%{pgmajorversion}-setup" - - - -%post -installation_message=" -+--------------------------------------------------------------+ -Please run 'sudo citus-enterprise-pg-%{pgmajorversion}-setup' -to complete the setup of Citus Enterprise -+--------------------------------------------------------------+ -" -echo "$installation_message" - - -%preun -libdir="%{pginstdir}/lib" - -secret_files_list="$libdir/citus_secret_files.metadata" - -# Cleanup all de decrypted files since these are not managed by the package -# manager and would be left around otherwise -while read -r path_unencrypted; do - rm -f "$path_unencrypted" -done < "$secret_files_list" - -%endif # encrypted packages code - -%clean -%{__rm} -rf %{buildroot} - -%files -f installation_files.list -%files -%defattr(-,root,root,-) -%doc CHANGELOG.md -%if 0%{?rhel} && 0%{?rhel} <= 6 -%doc LICENSE -%else -%license LICENSE -%endif -%doc %{pginstdir}/doc/extension/README-%{sname}.md - -%changelog -{{changelog}} +%global pgmajorversion 11 +%global pgpackageversion 11 +%global pginstdir /usr/pgsql-%{pgpackageversion} +%global sname citus-enterprise +%global pname citus +%global debug_package %{nil} +%global unencrypted_package "%{getenv:UNENCRYPTED_PACKAGE}" + +Summary: PostgreSQL-based distributed RDBMS +Name: %{sname}%{?pkginfix}_%{pgmajorversion} +Provides: citus_%{pgmajorversion} +Conflicts: citus_%{pgmajorversion} +Version: {{rpm_version}} +Release: 1%{dist} +License: Commercial +Group: Applications/Databases +Source0: https://github.com/citusdata/citus-enterprise/archive/v{{version}}.tar.gz +URL: https://github.com/citusdata/citus-enterprise +BuildRequires: postgresql%{pgmajorversion}-devel libcurl-devel +Requires: postgresql%{pgmajorversion}-server +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +%description +Citus horizontally scales PostgreSQL across commodity servers +using sharding and replication. Its query engine parallelizes +incoming SQL queries across these servers to enable real-time +responses on large datasets. + +Citus extends the underlying database rather than forking it, +which gives developers and enterprises the power and familiarity +of a traditional relational database. As an extension, Citus +supports new PostgreSQL releases, allowing users to benefit from +new features while maintaining compatibility with existing +PostgreSQL tools. Note that Citus supports many (but not all) SQL +commands. + +%prep +%setup -q -n %{sname}-%{version} + +%build + +currentgccver="$(gcc -dumpversion)" +requiredgccver="4.8.2" +if [ "$(printf '%s\n' "$requiredgccver" "$currentgccver" | sort -V | head -n1)" != "$requiredgccver" ]; then + echo ERROR: At least GCC version "$requiredgccver" is needed to build with security flags + exit 1 +fi + +%configure PG_CONFIG=%{pginstdir}/bin/pg_config --with-extra-version="%{?conf_extra_version}" --with-security-flags CC=$(command -v gcc) +make %{?_smp_mflags} + +%install +%make_install +# Install documentation with a better name: +%{__mkdir} -p %{buildroot}%{pginstdir}/doc/extension +%{__cp} README.md %{buildroot}%{pginstdir}/doc/extension/README-%{sname}.md +# Set paths to be packaged other than LICENSE, README & CHANGELOG.md +echo %{pginstdir}/include/server/citus_*.h >> installation_files.list +echo %{pginstdir}/include/server/distributed/*.h >> installation_files.list +echo %{pginstdir}/share/extension/citus-*.sql >> installation_files.list +[[ -f %{buildroot}%{pginstdir}/bin/pg_send_cancellation ]] && echo %{pginstdir}/bin/pg_send_cancellation >> installation_files.list +%if %{unencrypted_package} != "" + echo %{pginstdir}/lib/citus.so >> installation_files.list + [[ -f %{buildroot}%{pginstdir}/lib/citus_columnar.so ]] && echo %{pginstdir}/lib/citus_columnar.so >> installation_files.list + echo %{pginstdir}/share/extension/citus.control >> installation_files.list + %ifarch ppc64 ppc64le + %else + %if 0%{?rhel} && 0%{?rhel} <= 6 + %else + echo %{pginstdir}/lib/bitcode/%{pname}*.bc >> installation_files.list + echo %{pginstdir}/lib/bitcode/%{pname}/*.bc >> installation_files.list + echo %{pginstdir}/lib/bitcode/%{pname}/*/*.bc >> installation_files.list + + # Columnar does not exist in Citus versions < 10.0 + # At this point, we don't have %{pginstdir}, + # so first check build directory for columnar. + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/columnar/ ]] && echo %{pginstdir}/lib/bitcode/columnar/*.bc >> installation_files.list + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_columnar/ ]] && echo %{pginstdir}/lib/bitcode/citus_columnar/*.bc >> installation_files.list + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_columnar/safeclib ]] && echo %{pginstdir}/lib/bitcode/citus_columnar/safeclib/*.bc >> installation_files.list + %endif + %endif +%else + echo /usr/bin/citus-enterprise-pg-%{pgmajorversion}-setup >> installation_files.list + echo %{pginstdir}/lib/citus_secret_files.metadata >> installation_files.list + echo %{pginstdir}/lib/citus.so.gpg >> installation_files.list + echo %{pginstdir}/share/extension/citus.control.gpg >> installation_files.list + %ifarch ppc64 ppc64le + %else + %if 0%{?rhel} && 0%{?rhel} <= 6 + %else + echo %{pginstdir}/lib/bitcode/%{pname}*.bc.gpg >> installation_files.list + echo %{pginstdir}/lib/bitcode/%{pname}/*.bc.gpg >> installation_files.list + echo %{pginstdir}/lib/bitcode/%{pname}/*/*.bc.gpg >> installation_files.list + + # Columnar does not exist in Citus versions < 10.0 + # At this point, we don't have %{pginstdir}, + # so first check build directory for columnar. + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/columnar/ ]] && echo %{pginstdir}/lib/bitcode/columnar/*.bc.gpg >> installation_files.list + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_columnar/ ]] && echo %{pginstdir}/lib/bitcode/citus_columnar/*.bc.gpg >> installation_files.list + %endif + %endif +%endif +%if %{unencrypted_package} == "" + +set -eu +set +x + +dir="%{buildroot}" +libdir="$dir/%{pginstdir}/lib" +mkdir -p "$libdir" + +# List all files to be encrypted and store it in the libdir as secret_files_list +secret_files_list="$libdir/citus_secret_files.metadata" +find "$dir" -iname "*.so" -o -iname "*.bc" -o -iname "*.control" | sed -e "s@^$dir@@g" > "$secret_files_list" + +PACKAGE_ENCRYPTION_KEY="${PACKAGE_ENCRYPTION_KEY:-}" +if [ -z "$PACKAGE_ENCRYPTION_KEY" ]; then + echo "ERROR: The PACKAGE_ENCRYPTION_KEY environment variable needs to be set" + echo "HINT: If trying to build packages locally, just set it to 'abc' or something" + echo "HINT: If you're trying to build unencrypted packages you should set the UNENCRYPTED_PACKAGE environment variable" + exit 1 +fi + +# create a temporary directory for gpg to use so it doesn't output warnings +temp_gnupghome="$(mktemp -d)" +while read -r unencrypted_file; do + path_unencrypted="$dir$unencrypted_file" + path_encrypted="$path_unencrypted.gpg" + + # encrypt the files using password + # --s2k-* options are there to make sure decrypting/encrypting doesn't + # take minutes + gpg --symmetric \ + --batch \ + --no-tty \ + --yes \ + --cipher-algo AES256 \ + --s2k-mode 3 \ + --s2k-count 1000000 \ + --s2k-digest-algo SHA512 \ + --passphrase-fd 0 \ + --homedir "$temp_gnupghome" \ + --output "$path_encrypted" \ + "$path_unencrypted" \ + <<< "$PACKAGE_ENCRYPTION_KEY" + + # keep permissions and ownership the same, so we can restore it later + # when decrypting + chmod --reference "$path_unencrypted" "$path_encrypted" + chown --reference "$path_unencrypted" "$path_encrypted" + + # remove the unencrypted file from the package + rm "$path_unencrypted" +done < "$secret_files_list" + +# remove the temporary gpg directory +rm -rf "$temp_gnupghome" + + +bindir="$dir/usr/bin" +mkdir -p "$bindir" + + +#------- START OF DECRYPT SCRIPT -------- +# Create file used to decrypt +cat > "$bindir/citus-enterprise-pg-%{pgmajorversion}-setup" << EOF +#!/bin/sh + +set -eu + +pg_version=%{pgmajorversion} +libdir="%{pginstdir}/lib" +secret_files_list="\$libdir/citus_secret_files.metadata" + +# Make sure the script is being run as root +if [ "\$(id -u)" -ne "0" ]; then + echo "ERROR: citus-enterprise-pg-\$pg_version-setup needs to be run as root" + echo "HINT: try running \"sudo citus-enterprise-pg-\$pg_version-setup\" instead" + exit 1 +fi + + +echo " +Your use of this software is subject to the terms and conditions of the license +agreement by which you acquired this software. If you are a volume license +customer, use of this software is subject to your volume license agreement. +You may not use this software if you have not validly acquired a license for +the software from Microsoft or its licensed distributors. + +BY USING THE SOFTWARE, YOU ACCEPT THIS AGREEMENT. +" + +CITUS_ACCEPT_LICENSE="\${CITUS_ACCEPT_LICENSE:-}" + +interactive_license=false +while [ -z "\$CITUS_ACCEPT_LICENSE" ]; do + interactive_license=true + echo "Do you accept these terms? YES/NO" + read -r CITUS_ACCEPT_LICENSE +done + +case "\$CITUS_ACCEPT_LICENSE" in + YES );; + y|Y|Yes|yes ) + echo "ERROR: Only YES is accepted (all capital letters)" + exit 1; + ;; + * ) + echo "ERROR: Terms of the software must be accepted" + exit 1 +esac + +if [ \$interactive_license = false ]; then + echo "Accepted terms by using CITUS_ACCEPT_LICENSE=YES environment variable" +fi + +encryption_disclaimer_text=" +Since Citus is a distributed database, data is sent over the network between +nodes. It is YOUR RESPONSIBILITY as an operator to ensure that this traffic is +secure. + +Since Citus version 8.1.0 (released 2018-12-17) the traffic between the +different nodes in the cluster is encrypted for NEW installations. This is done +by using TLS with self-signed certificates. This means that this does NOT +protect against Man-In-The-Middle attacks. This only protects against passive +eavesdropping on the network. + +This automatic TLS setup of self-signed certificates and TLS is NOT DONE in the +following cases: +1. The Citus clusters was originally created with a Citus version before 8.1.0. + Even when the cluster is later upgraded to version 8.1.0 or higher. This is + to make sure partially upgraded clusters continue to work. +2. The ssl Postgres configuration option is already set to 'on'. This indicates + that the operator has set up their own certificates. + +In these cases it is assumed the operator has set up appropriate security +themselves. + +So, with the default settings Citus clusters are not safe from +Man-In-The-Middle attacks. To secure the traffic completely you need to follow +the practices outlined here: +https://docs.citusdata.com/en/stable/admin_guide/cluster_management.html#connection-management + +Please confirm that you have read this and understand that you should set up +TLS yourself to send traffic between nodes securely: +YES/NO?" + +CITUS_ACCEPT_ENCRYPTION_DISCLAIMER="\${CITUS_ACCEPT_ENCRYPTION_DISCLAIMER:-}" +while [ -z "\$CITUS_ACCEPT_ENCRYPTION_DISCLAIMER" ]; do + echo "\$encryption_disclaimer_text" + read -r CITUS_ACCEPT_ENCRYPTION_DISCLAIMER +done + +case "\$CITUS_ACCEPT_ENCRYPTION_DISCLAIMER" in + YES );; + y|Y|Yes|yes ) + echo "ERROR: Only YES is accepted (all capital letters)" + exit 1; + ;; + * ) + echo "ERROR: Disclaimer about encrypted traffic must be accepted before installing" + exit 1 +esac + +# create a temporary directory for gpg to use so it doesn't output warnings +temp_gnupghome="\$(mktemp -d)" +CITUS_LICENSE_KEY="\${CITUS_LICENSE_KEY:-}" +while [ -z "\$CITUS_LICENSE_KEY" ]; do + echo '' + echo 'Please enter license key:' + read -r CITUS_LICENSE_KEY +done + +# Try to decrypt the first file in the list to check if the key is correct +if ! gpg --output "/dev/null" \ + --batch --no-tty --yes --quiet \ + --passphrase "\$CITUS_LICENSE_KEY" \ + --homedir "\$temp_gnupghome" \ + --decrypt "\$(head -n 1 "\$secret_files_list").gpg" 2> /dev/null; then + echo "ERROR: Invalid license key supplied" + exit 1 +fi + +echo "License key is valid" +echo "Installing..." + +# Decrypt all the encrypted files +while read -r path_unencrypted; do + path_encrypted="\$path_unencrypted.gpg" + # decrypt the encrypted file + gpg --output "\$path_unencrypted" \ + --batch --no-tty --yes --quiet \ + --passphrase "\$CITUS_LICENSE_KEY" \ + --homedir "\$temp_gnupghome" \ + --decrypt "\$path_encrypted" + + # restore permissions and ownership + chmod --reference "\$path_encrypted" "\$path_unencrypted" + chown --reference "\$path_encrypted" "\$path_unencrypted" +done < "\$secret_files_list" + +# remove the temporary gpg directory +rm -rf "\$temp_gnupghome" +EOF + +#------- END OF DECRYPT SCRIPT -------- + +chmod +x "$bindir/citus-enterprise-pg-%{pgmajorversion}-setup" + +cat "$bindir/citus-enterprise-pg-%{pgmajorversion}-setup" + + + +%post +installation_message=" ++--------------------------------------------------------------+ +Please run 'sudo citus-enterprise-pg-%{pgmajorversion}-setup' +to complete the setup of Citus Enterprise ++--------------------------------------------------------------+ +" +echo "$installation_message" + + +%preun +libdir="%{pginstdir}/lib" + +secret_files_list="$libdir/citus_secret_files.metadata" + +# Cleanup all de decrypted files since these are not managed by the package +# manager and would be left around otherwise +while read -r path_unencrypted; do + rm -f "$path_unencrypted" +done < "$secret_files_list" + +%endif # encrypted packages code + +%clean +%{__rm} -rf %{buildroot} + +%files -f installation_files.list +%files +%defattr(-,root,root,-) +%doc CHANGELOG.md +%if 0%{?rhel} && 0%{?rhel} <= 6 +%doc LICENSE +%else +%license LICENSE +%endif +%doc %{pginstdir}/doc/extension/README-%{sname}.md + +%changelog +{{changelog}} diff --git a/packaging_automation/templates/citus-pkgvars.tmpl b/packaging_automation/templates/citus-pkgvars.tmpl index 2f768b9d..c1de7e80 100644 --- a/packaging_automation/templates/citus-pkgvars.tmpl +++ b/packaging_automation/templates/citus-pkgvars.tmpl @@ -1,5 +1,5 @@ -pkgname=citus -pkgdesc='Citus (Open-Source)' -pkglatest={{version}} -nightlyref=main -versioning=fancy +pkgname=citus +pkgdesc='Citus (Open-Source)' +pkglatest={{version}} +nightlyref=main +versioning=fancy diff --git a/packaging_automation/templates/citus.spec.tmpl b/packaging_automation/templates/citus.spec.tmpl index 27107086..3cba6d7f 100644 --- a/packaging_automation/templates/citus.spec.tmpl +++ b/packaging_automation/templates/citus.spec.tmpl @@ -1,115 +1,115 @@ -%global pgmajorversion 11 -%global pgpackageversion 11 -%global pginstdir /usr/pgsql-%{pgpackageversion} -%global sname citus -%global debug_package %{nil} - -Summary: PostgreSQL-based distributed RDBMS -Name: %{sname}%{?pkginfix}_%{pgmajorversion} -Provides: %{sname}_%{pgmajorversion} -Conflicts: %{sname}_%{pgmajorversion} -Version: {{rpm_version}} -Release: {{fancy_version_no}}%{dist} -License: AGPLv3 -Group: Applications/Databases -Source0: https://github.com/citusdata/citus/archive/v{{version}}.tar.gz -URL: https://github.com/citusdata/citus -BuildRequires: postgresql%{pgmajorversion}-devel libcurl-devel -Requires: postgresql%{pgmajorversion}-server -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -%description -Citus horizontally scales PostgreSQL across commodity servers -using sharding and replication. Its query engine parallelizes -incoming SQL queries across these servers to enable real-time -responses on large datasets. - -Citus extends the underlying database rather than forking it, -which gives developers and enterprises the power and familiarity -of a traditional relational database. As an extension, Citus -supports new PostgreSQL releases, allowing users to benefit from -new features while maintaining compatibility with existing -PostgreSQL tools. Note that Citus supports many (but not all) SQL -commands. - -%prep -%setup -q -n %{sname}-%{version} - -%build - -currentgccver="$(gcc -dumpversion)" -requiredgccver="4.8.2" -if [ "$(printf '%s\n' "$requiredgccver" "$currentgccver" | sort -V | head -n1)" != "$requiredgccver" ]; then - echo ERROR: At least GCC version "$requiredgccver" is needed to build with security flags - exit 1 -fi - -%configure PG_CONFIG=%{pginstdir}/bin/pg_config --with-extra-version="%{?conf_extra_version}" --with-security-flags CC=$(command -v gcc) -make %{?_smp_mflags} - -%install -%make_install -# Install documentation with a better name: -%{__mkdir} -p %{buildroot}%{pginstdir}/doc/extension -%{__cp} README.md %{buildroot}%{pginstdir}/doc/extension/README-%{sname}.md -%{__cp} NOTICE %{buildroot}%{pginstdir}/doc/extension/NOTICE-%{sname} -# Set paths to be packaged other than LICENSE, README & CHANGELOG.md -echo %{pginstdir}/include/server/citus_*.h >> installation_files.list -echo %{pginstdir}/include/server/distributed/*.h >> installation_files.list -echo %{pginstdir}/lib/%{sname}.so >> installation_files.list -[[ -f %{buildroot}%{pginstdir}/lib/citus_columnar.so ]] && echo %{pginstdir}/lib/citus_columnar.so >> installation_files.list -[[ -f %{buildroot}%{pginstdir}/lib/citus_decoders/pgoutput.so ]] && echo %{pginstdir}/lib/citus_decoders/pgoutput.so >> installation_files.list -[[ -f %{buildroot}%{pginstdir}/lib/citus_decoders/wal2json.so ]] && echo %{pginstdir}/lib/citus_decoders/wal2json.so >> installation_files.list -[[ -f %{buildroot}%{pginstdir}/lib/citus_pgoutput.so ]] && echo %{pginstdir}/lib/citus_pgoutput.so >> installation_files.list -[[ -f %{buildroot}%{pginstdir}/lib/citus_wal2json.so ]] && echo %{pginstdir}/lib/citus_wal2json.so >> installation_files.list -echo %{pginstdir}/share/extension/%{sname}-*.sql >> installation_files.list -echo %{pginstdir}/share/extension/%{sname}.control >> installation_files.list -# Since files below may be non-existent in some versions, ignoring the error in case of file absence -[[ -f %{buildroot}%{pginstdir}/share/extension/citus_columnar.control ]] && echo %{pginstdir}/share/extension/citus_columnar.control >> installation_files.list -columnar_sql_files=(`find %{buildroot}%{pginstdir}/share/extension -maxdepth 1 -name "columnar-*.sql"`) -if [ ${{ '{#' }}columnar_sql_files[@]} -gt 0 ]; then - echo %{pginstdir}/share/extension/columnar-*.sql >> installation_files.list -fi - -citus_columnar_sql_files=(`find %{buildroot}%{pginstdir}/share/extension -maxdepth 1 -name "citus_columnar-*.sql"`) -if [ ${{ '{#' }}citus_columnar_sql_files[@]} -gt 0 ]; then - echo %{pginstdir}/share/extension/citus_columnar-*.sql >> installation_files.list -fi - -[[ -f %{buildroot}%{pginstdir}/bin/pg_send_cancellation ]] && echo %{pginstdir}/bin/pg_send_cancellation >> installation_files.list -%ifarch ppc64 ppc64le -%else - %if 0%{?rhel} && 0%{?rhel} <= 6 - %else - echo %{pginstdir}/lib/bitcode/%{sname}/*.bc >> installation_files.list - echo %{pginstdir}/lib/bitcode/%{sname}*.bc >> installation_files.list - echo %{pginstdir}/lib/bitcode/%{sname}/*/*.bc >> installation_files.list - - # Columnar does not exist in Citus versions < 10.0 - # At this point, we don't have %{pginstdir}, - # so first check build directory for columnar. - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/columnar/ ]] && echo %{pginstdir}/lib/bitcode/columnar/*.bc >> installation_files.list - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_columnar/ ]] && echo %{pginstdir}/lib/bitcode/citus_columnar/*.bc >> installation_files.list - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_columnar/safeclib ]] && echo %{pginstdir}/lib/bitcode/citus_columnar/safeclib/*.bc >> installation_files.list - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_pgoutput ]] && echo %{pginstdir}/lib/bitcode/citus_pgoutput/*.bc >> installation_files.list - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_wal2json ]] && echo %{pginstdir}/lib/bitcode/citus_wal2json/*.bc >> installation_files.list - %endif -%endif - -%clean -%{__rm} -rf %{buildroot} - -%files -f installation_files.list -%files -%defattr(-,root,root,-) -%doc CHANGELOG.md -%if 0%{?rhel} && 0%{?rhel} <= 6 -%doc LICENSE -%else -%license LICENSE -%endif -%doc %{pginstdir}/doc/extension/README-%{sname}.md -%doc %{pginstdir}/doc/extension/NOTICE-%{sname} - -%changelog -{{changelog}} +%global pgmajorversion 11 +%global pgpackageversion 11 +%global pginstdir /usr/pgsql-%{pgpackageversion} +%global sname citus +%global debug_package %{nil} + +Summary: PostgreSQL-based distributed RDBMS +Name: %{sname}%{?pkginfix}_%{pgmajorversion} +Provides: %{sname}_%{pgmajorversion} +Conflicts: %{sname}_%{pgmajorversion} +Version: {{rpm_version}} +Release: {{fancy_version_no}}%{dist} +License: AGPLv3 +Group: Applications/Databases +Source0: https://github.com/citusdata/citus/archive/v{{version}}.tar.gz +URL: https://github.com/citusdata/citus +BuildRequires: postgresql%{pgmajorversion}-devel libcurl-devel +Requires: postgresql%{pgmajorversion}-server +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +%description +Citus horizontally scales PostgreSQL across commodity servers +using sharding and replication. Its query engine parallelizes +incoming SQL queries across these servers to enable real-time +responses on large datasets. + +Citus extends the underlying database rather than forking it, +which gives developers and enterprises the power and familiarity +of a traditional relational database. As an extension, Citus +supports new PostgreSQL releases, allowing users to benefit from +new features while maintaining compatibility with existing +PostgreSQL tools. Note that Citus supports many (but not all) SQL +commands. + +%prep +%setup -q -n %{sname}-%{version} + +%build + +currentgccver="$(gcc -dumpversion)" +requiredgccver="4.8.2" +if [ "$(printf '%s\n' "$requiredgccver" "$currentgccver" | sort -V | head -n1)" != "$requiredgccver" ]; then + echo ERROR: At least GCC version "$requiredgccver" is needed to build with security flags + exit 1 +fi + +%configure PG_CONFIG=%{pginstdir}/bin/pg_config --with-extra-version="%{?conf_extra_version}" --with-security-flags CC=$(command -v gcc) +make %{?_smp_mflags} + +%install +%make_install +# Install documentation with a better name: +%{__mkdir} -p %{buildroot}%{pginstdir}/doc/extension +%{__cp} README.md %{buildroot}%{pginstdir}/doc/extension/README-%{sname}.md +%{__cp} NOTICE %{buildroot}%{pginstdir}/doc/extension/NOTICE-%{sname} +# Set paths to be packaged other than LICENSE, README & CHANGELOG.md +echo %{pginstdir}/include/server/citus_*.h >> installation_files.list +echo %{pginstdir}/include/server/distributed/*.h >> installation_files.list +echo %{pginstdir}/lib/%{sname}.so >> installation_files.list +[[ -f %{buildroot}%{pginstdir}/lib/citus_columnar.so ]] && echo %{pginstdir}/lib/citus_columnar.so >> installation_files.list +[[ -f %{buildroot}%{pginstdir}/lib/citus_decoders/pgoutput.so ]] && echo %{pginstdir}/lib/citus_decoders/pgoutput.so >> installation_files.list +[[ -f %{buildroot}%{pginstdir}/lib/citus_decoders/wal2json.so ]] && echo %{pginstdir}/lib/citus_decoders/wal2json.so >> installation_files.list +[[ -f %{buildroot}%{pginstdir}/lib/citus_pgoutput.so ]] && echo %{pginstdir}/lib/citus_pgoutput.so >> installation_files.list +[[ -f %{buildroot}%{pginstdir}/lib/citus_wal2json.so ]] && echo %{pginstdir}/lib/citus_wal2json.so >> installation_files.list +echo %{pginstdir}/share/extension/%{sname}-*.sql >> installation_files.list +echo %{pginstdir}/share/extension/%{sname}.control >> installation_files.list +# Since files below may be non-existent in some versions, ignoring the error in case of file absence +[[ -f %{buildroot}%{pginstdir}/share/extension/citus_columnar.control ]] && echo %{pginstdir}/share/extension/citus_columnar.control >> installation_files.list +columnar_sql_files=(`find %{buildroot}%{pginstdir}/share/extension -maxdepth 1 -name "columnar-*.sql"`) +if [ ${{ '{#' }}columnar_sql_files[@]} -gt 0 ]; then + echo %{pginstdir}/share/extension/columnar-*.sql >> installation_files.list +fi + +citus_columnar_sql_files=(`find %{buildroot}%{pginstdir}/share/extension -maxdepth 1 -name "citus_columnar-*.sql"`) +if [ ${{ '{#' }}citus_columnar_sql_files[@]} -gt 0 ]; then + echo %{pginstdir}/share/extension/citus_columnar-*.sql >> installation_files.list +fi + +[[ -f %{buildroot}%{pginstdir}/bin/pg_send_cancellation ]] && echo %{pginstdir}/bin/pg_send_cancellation >> installation_files.list +%ifarch ppc64 ppc64le +%else + %if 0%{?rhel} && 0%{?rhel} <= 6 + %else + echo %{pginstdir}/lib/bitcode/%{sname}/*.bc >> installation_files.list + echo %{pginstdir}/lib/bitcode/%{sname}*.bc >> installation_files.list + echo %{pginstdir}/lib/bitcode/%{sname}/*/*.bc >> installation_files.list + + # Columnar does not exist in Citus versions < 10.0 + # At this point, we don't have %{pginstdir}, + # so first check build directory for columnar. + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/columnar/ ]] && echo %{pginstdir}/lib/bitcode/columnar/*.bc >> installation_files.list + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_columnar/ ]] && echo %{pginstdir}/lib/bitcode/citus_columnar/*.bc >> installation_files.list + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_columnar/safeclib ]] && echo %{pginstdir}/lib/bitcode/citus_columnar/safeclib/*.bc >> installation_files.list + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_pgoutput ]] && echo %{pginstdir}/lib/bitcode/citus_pgoutput/*.bc >> installation_files.list + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_wal2json ]] && echo %{pginstdir}/lib/bitcode/citus_wal2json/*.bc >> installation_files.list + %endif +%endif + +%clean +%{__rm} -rf %{buildroot} + +%files -f installation_files.list +%files +%defattr(-,root,root,-) +%doc CHANGELOG.md +%if 0%{?rhel} && 0%{?rhel} <= 6 +%doc LICENSE +%else +%license LICENSE +%endif +%doc %{pginstdir}/doc/extension/README-%{sname}.md +%doc %{pginstdir}/doc/extension/NOTICE-%{sname} + +%changelog +{{changelog}} diff --git a/packaging_automation/templates/docker/alpine/alpine.tmpl.dockerfile b/packaging_automation/templates/docker/alpine/alpine.tmpl.dockerfile index f1d89cf8..9cdd0c1d 100644 --- a/packaging_automation/templates/docker/alpine/alpine.tmpl.dockerfile +++ b/packaging_automation/templates/docker/alpine/alpine.tmpl.dockerfile @@ -1,58 +1,58 @@ -# This file is auto generated from it's template, -# see citusdata/tools/packaging_automation/templates/docker/alpine/alpine.tmpl.dockerfile. -FROM postgres:{{postgres_version}}-alpine -ARG VERSION={{project_version}} -LABEL maintainer="Citus Data https://citusdata.com" \ - org.label-schema.name="Citus" \ - org.label-schema.description="Scalable PostgreSQL for multi-tenant and real-time workloads" \ - org.label-schema.url="https://www.citusdata.com" \ - org.label-schema.vcs-url="https://github.com/citusdata/citus" \ - org.label-schema.vendor="Citus Data, Inc." \ - org.label-schema.version=${VERSION}-alpine \ - org.label-schema.schema-version="1.0" - -# Build citus and delete all used libraries. Warning: Libraries installed in this section will be deleted after build completion -RUN apk add --no-cache \ - --virtual builddeps \ - build-base \ - krb5-dev \ - curl \ - curl-dev \ - openssl-dev \ - ca-certificates \ - clang \ - llvm \ - lz4-dev \ - zstd-dev \ - libxslt-dev \ - libxml2-dev \ - icu-dev && \ - apk add --no-cache libcurl && \ - curl -sfLO "https://github.com/citusdata/citus/archive/v${VERSION}.tar.gz" && \ - tar xzf "v${VERSION}.tar.gz" && \ - cd "citus-${VERSION}" && \ - ./configure --with-security-flags && \ - make install && \ - cd .. && \ - rm -rf "citus-${VERSION}" "v${VERSION}.tar.gz" && \ - apk del builddeps - -#--------End of Citus Build - -# add citus to default PostgreSQL config -RUN echo "shared_preload_libraries='citus'" >> /usr/local/share/postgresql/postgresql.conf.sample - -# add scripts to run after initdb -COPY 001-create-citus-extension.sql /docker-entrypoint-initdb.d/ - -# add health check script -COPY pg_healthcheck / - -# entry point unsets PGPASSWORD, but we need it to connect to workers -# https://github.com/docker-library/postgres/blob/33bccfcaddd0679f55ee1028c012d26cd196537d/12/docker-entrypoint.sh#L303 -RUN sed "/unset PGPASSWORD/d" -i /usr/local/bin/docker-entrypoint.sh - -# Add lz4 dependencies -RUN apk add zstd zstd-dev lz4 lz4-dev - -HEALTHCHECK --interval=4s --start-period=6s CMD ./pg_healthcheck +# This file is auto generated from it's template, +# see citusdata/tools/packaging_automation/templates/docker/alpine/alpine.tmpl.dockerfile. +FROM postgres:{{postgres_version}}-alpine +ARG VERSION={{project_version}} +LABEL maintainer="Citus Data https://citusdata.com" \ + org.label-schema.name="Citus" \ + org.label-schema.description="Scalable PostgreSQL for multi-tenant and real-time workloads" \ + org.label-schema.url="https://www.citusdata.com" \ + org.label-schema.vcs-url="https://github.com/citusdata/citus" \ + org.label-schema.vendor="Citus Data, Inc." \ + org.label-schema.version=${VERSION}-alpine \ + org.label-schema.schema-version="1.0" + +# Build citus and delete all used libraries. Warning: Libraries installed in this section will be deleted after build completion +RUN apk add --no-cache \ + --virtual builddeps \ + build-base \ + krb5-dev \ + curl \ + curl-dev \ + openssl-dev \ + ca-certificates \ + clang \ + llvm \ + lz4-dev \ + zstd-dev \ + libxslt-dev \ + libxml2-dev \ + icu-dev && \ + apk add --no-cache libcurl && \ + curl -sfLO "https://github.com/citusdata/citus/archive/v${VERSION}.tar.gz" && \ + tar xzf "v${VERSION}.tar.gz" && \ + cd "citus-${VERSION}" && \ + ./configure --with-security-flags && \ + make install && \ + cd .. && \ + rm -rf "citus-${VERSION}" "v${VERSION}.tar.gz" && \ + apk del builddeps + +#--------End of Citus Build + +# add citus to default PostgreSQL config +RUN echo "shared_preload_libraries='citus'" >> /usr/local/share/postgresql/postgresql.conf.sample + +# add scripts to run after initdb +COPY 001-create-citus-extension.sql /docker-entrypoint-initdb.d/ + +# add health check script +COPY pg_healthcheck / + +# entry point unsets PGPASSWORD, but we need it to connect to workers +# https://github.com/docker-library/postgres/blob/33bccfcaddd0679f55ee1028c012d26cd196537d/12/docker-entrypoint.sh#L303 +RUN sed "/unset PGPASSWORD/d" -i /usr/local/bin/docker-entrypoint.sh + +# Add lz4 dependencies +RUN apk add zstd zstd-dev lz4 lz4-dev + +HEALTHCHECK --interval=4s --start-period=6s CMD ./pg_healthcheck diff --git a/packaging_automation/templates/docker/latest/docker-compose.tmpl.yml b/packaging_automation/templates/docker/latest/docker-compose.tmpl.yml index 8bec3ad0..5347396f 100644 --- a/packaging_automation/templates/docker/latest/docker-compose.tmpl.yml +++ b/packaging_automation/templates/docker/latest/docker-compose.tmpl.yml @@ -1,34 +1,34 @@ -# This file is auto generated from it's template, -# see citusdata/tools/packaging_automation/templates/docker/latest/docker-compose.tmpl.yml. -version: "3" - -services: - master: - container_name: "${COMPOSE_PROJECT_NAME:-citus}_master" - image: "citusdata/citus:{{project_version}}" - ports: ["${COORDINATOR_EXTERNAL_PORT:-5432}:5432"] - labels: ["com.citusdata.role=Master"] - environment: &AUTH - POSTGRES_USER: "${POSTGRES_USER:-postgres}" - POSTGRES_PASSWORD: "${POSTGRES_PASSWORD}" - PGUSER: "${POSTGRES_USER:-postgres}" - PGPASSWORD: "${POSTGRES_PASSWORD}" - POSTGRES_HOST_AUTH_METHOD: "${POSTGRES_HOST_AUTH_METHOD:-trust}" - worker: - image: "citusdata/citus:{{project_version}}" - labels: ["com.citusdata.role=Worker"] - depends_on: [manager] - environment: *AUTH - command: "/wait-for-manager.sh" - volumes: - - healthcheck-volume:/healthcheck - manager: - container_name: "${COMPOSE_PROJECT_NAME:-citus}_manager" - image: "citusdata/membership-manager:0.3.0" - volumes: - - "${DOCKER_SOCK:-/var/run/docker.sock}:/var/run/docker.sock" - - healthcheck-volume:/healthcheck - depends_on: [master] - environment: *AUTH -volumes: - healthcheck-volume: +# This file is auto generated from it's template, +# see citusdata/tools/packaging_automation/templates/docker/latest/docker-compose.tmpl.yml. +version: "3" + +services: + master: + container_name: "${COMPOSE_PROJECT_NAME:-citus}_master" + image: "citusdata/citus:{{project_version}}" + ports: ["${COORDINATOR_EXTERNAL_PORT:-5432}:5432"] + labels: ["com.citusdata.role=Master"] + environment: &AUTH + POSTGRES_USER: "${POSTGRES_USER:-postgres}" + POSTGRES_PASSWORD: "${POSTGRES_PASSWORD}" + PGUSER: "${POSTGRES_USER:-postgres}" + PGPASSWORD: "${POSTGRES_PASSWORD}" + POSTGRES_HOST_AUTH_METHOD: "${POSTGRES_HOST_AUTH_METHOD:-trust}" + worker: + image: "citusdata/citus:{{project_version}}" + labels: ["com.citusdata.role=Worker"] + depends_on: [manager] + environment: *AUTH + command: "/wait-for-manager.sh" + volumes: + - healthcheck-volume:/healthcheck + manager: + container_name: "${COMPOSE_PROJECT_NAME:-citus}_manager" + image: "citusdata/membership-manager:0.3.0" + volumes: + - "${DOCKER_SOCK:-/var/run/docker.sock}:/var/run/docker.sock" + - healthcheck-volume:/healthcheck + depends_on: [master] + environment: *AUTH +volumes: + healthcheck-volume: diff --git a/packaging_automation/templates/docker/latest/latest.tmpl.dockerfile b/packaging_automation/templates/docker/latest/latest.tmpl.dockerfile index 0e70b981..bb2baf91 100644 --- a/packaging_automation/templates/docker/latest/latest.tmpl.dockerfile +++ b/packaging_automation/templates/docker/latest/latest.tmpl.dockerfile @@ -1,42 +1,42 @@ -# This file is auto generated from it's template, -# see citusdata/tools/packaging_automation/templates/docker/latest/latest.tmpl.dockerfile. -FROM postgres:{{postgres_version}} -ARG VERSION={{project_version}} -LABEL maintainer="Citus Data https://citusdata.com" \ - org.label-schema.name="Citus" \ - org.label-schema.description="Scalable PostgreSQL for multi-tenant and real-time workloads" \ - org.label-schema.url="https://www.citusdata.com" \ - org.label-schema.vcs-url="https://github.com/citusdata/citus" \ - org.label-schema.vendor="Citus Data, Inc." \ - org.label-schema.version=${VERSION} \ - org.label-schema.schema-version="1.0" - -ENV CITUS_VERSION ${VERSION}.citus-1 - -# install Citus -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ - ca-certificates \ - curl \ - && curl -s https://install.citusdata.com/community/deb.sh | bash \ - && apt-get install -y postgresql-$PG_MAJOR-citus-{{project_minor_version}}=$CITUS_VERSION \ - postgresql-$PG_MAJOR-hll=2.17.citus-1 \ - postgresql-$PG_MAJOR-topn=2.5.0.citus-1 \ - && apt-get purge -y --auto-remove curl \ - && rm -rf /var/lib/apt/lists/* - -# add citus to default PostgreSQL config -RUN echo "shared_preload_libraries='citus'" >> /usr/share/postgresql/postgresql.conf.sample - -# add scripts to run after initdb -COPY 001-create-citus-extension.sql /docker-entrypoint-initdb.d/ - -# add health check script -COPY pg_healthcheck wait-for-manager.sh / -RUN chmod +x /wait-for-manager.sh - -# entry point unsets PGPASSWORD, but we need it to connect to workers -# https://github.com/docker-library/postgres/blob/33bccfcaddd0679f55ee1028c012d26cd196537d/12/docker-entrypoint.sh#L303 -RUN sed "/unset PGPASSWORD/d" -i /usr/local/bin/docker-entrypoint.sh - -HEALTHCHECK --interval=4s --start-period=6s CMD ./pg_healthcheck +# This file is auto generated from it's template, +# see citusdata/tools/packaging_automation/templates/docker/latest/latest.tmpl.dockerfile. +FROM postgres:{{postgres_version}} +ARG VERSION={{project_version}} +LABEL maintainer="Citus Data https://citusdata.com" \ + org.label-schema.name="Citus" \ + org.label-schema.description="Scalable PostgreSQL for multi-tenant and real-time workloads" \ + org.label-schema.url="https://www.citusdata.com" \ + org.label-schema.vcs-url="https://github.com/citusdata/citus" \ + org.label-schema.vendor="Citus Data, Inc." \ + org.label-schema.version=${VERSION} \ + org.label-schema.schema-version="1.0" + +ENV CITUS_VERSION ${VERSION}.citus-1 + +# install Citus +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + ca-certificates \ + curl \ + && curl -s https://install.citusdata.com/community/deb.sh | bash \ + && apt-get install -y postgresql-$PG_MAJOR-citus-{{project_minor_version}}=$CITUS_VERSION \ + postgresql-$PG_MAJOR-hll=2.17.citus-1 \ + postgresql-$PG_MAJOR-topn=2.5.0.citus-1 \ + && apt-get purge -y --auto-remove curl \ + && rm -rf /var/lib/apt/lists/* + +# add citus to default PostgreSQL config +RUN echo "shared_preload_libraries='citus'" >> /usr/share/postgresql/postgresql.conf.sample + +# add scripts to run after initdb +COPY 001-create-citus-extension.sql /docker-entrypoint-initdb.d/ + +# add health check script +COPY pg_healthcheck wait-for-manager.sh / +RUN chmod +x /wait-for-manager.sh + +# entry point unsets PGPASSWORD, but we need it to connect to workers +# https://github.com/docker-library/postgres/blob/33bccfcaddd0679f55ee1028c012d26cd196537d/12/docker-entrypoint.sh#L303 +RUN sed "/unset PGPASSWORD/d" -i /usr/local/bin/docker-entrypoint.sh + +HEALTHCHECK --interval=4s --start-period=6s CMD ./pg_healthcheck diff --git a/packaging_automation/templates/docker/postgres-14/postgres-14.tmpl.dockerfile b/packaging_automation/templates/docker/postgres-14/postgres-14.tmpl.dockerfile index 900f3f7d..793959d3 100644 --- a/packaging_automation/templates/docker/postgres-14/postgres-14.tmpl.dockerfile +++ b/packaging_automation/templates/docker/postgres-14/postgres-14.tmpl.dockerfile @@ -1,42 +1,42 @@ -# This file is auto generated from it's template, -# see citusdata/tools/packaging_automation/templates/docker/postgres-14/postgres-14.tmpl.dockerfile. -FROM postgres:{{postgres_version}} -ARG VERSION={{project_version}} -LABEL maintainer="Citus Data https://citusdata.com" \ - org.label-schema.name="Citus" \ - org.label-schema.description="Scalable PostgreSQL for multi-tenant and real-time workloads" \ - org.label-schema.url="https://www.citusdata.com" \ - org.label-schema.vcs-url="https://github.com/citusdata/citus" \ - org.label-schema.vendor="Citus Data, Inc." \ - org.label-schema.version=${VERSION} \ - org.label-schema.schema-version="1.0" - -ENV CITUS_VERSION ${VERSION}.citus-1 - -# install Citus -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ - ca-certificates \ - curl \ - && curl -s https://install.citusdata.com/community/deb.sh | bash \ - && apt-get install -y postgresql-$PG_MAJOR-citus-{{project_minor_version}}=$CITUS_VERSION \ - postgresql-$PG_MAJOR-hll=2.18.citus-1 \ - postgresql-$PG_MAJOR-topn=2.6.0.citus-1 \ - && apt-get purge -y --auto-remove curl \ - && rm -rf /var/lib/apt/lists/* - -# add citus to default PostgreSQL config -RUN echo "shared_preload_libraries='citus'" >> /usr/share/postgresql/postgresql.conf.sample - -# add scripts to run after initdb -COPY 001-create-citus-extension.sql /docker-entrypoint-initdb.d/ - -# add health check script -COPY pg_healthcheck wait-for-manager.sh / -RUN chmod +x /wait-for-manager.sh - -# entry point unsets PGPASSWORD, but we need it to connect to workers -# https://github.com/docker-library/postgres/blob/33bccfcaddd0679f55ee1028c012d26cd196537d/12/docker-entrypoint.sh#L303 -RUN sed "/unset PGPASSWORD/d" -i /usr/local/bin/docker-entrypoint.sh - -HEALTHCHECK --interval=4s --start-period=6s CMD ./pg_healthcheck +# This file is auto generated from it's template, +# see citusdata/tools/packaging_automation/templates/docker/postgres-14/postgres-14.tmpl.dockerfile. +FROM postgres:{{postgres_version}} +ARG VERSION={{project_version}} +LABEL maintainer="Citus Data https://citusdata.com" \ + org.label-schema.name="Citus" \ + org.label-schema.description="Scalable PostgreSQL for multi-tenant and real-time workloads" \ + org.label-schema.url="https://www.citusdata.com" \ + org.label-schema.vcs-url="https://github.com/citusdata/citus" \ + org.label-schema.vendor="Citus Data, Inc." \ + org.label-schema.version=${VERSION} \ + org.label-schema.schema-version="1.0" + +ENV CITUS_VERSION ${VERSION}.citus-1 + +# install Citus +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + ca-certificates \ + curl \ + && curl -s https://install.citusdata.com/community/deb.sh | bash \ + && apt-get install -y postgresql-$PG_MAJOR-citus-{{project_minor_version}}=$CITUS_VERSION \ + postgresql-$PG_MAJOR-hll=2.18.citus-1 \ + postgresql-$PG_MAJOR-topn=2.6.0.citus-1 \ + && apt-get purge -y --auto-remove curl \ + && rm -rf /var/lib/apt/lists/* + +# add citus to default PostgreSQL config +RUN echo "shared_preload_libraries='citus'" >> /usr/share/postgresql/postgresql.conf.sample + +# add scripts to run after initdb +COPY 001-create-citus-extension.sql /docker-entrypoint-initdb.d/ + +# add health check script +COPY pg_healthcheck wait-for-manager.sh / +RUN chmod +x /wait-for-manager.sh + +# entry point unsets PGPASSWORD, but we need it to connect to workers +# https://github.com/docker-library/postgres/blob/33bccfcaddd0679f55ee1028c012d26cd196537d/12/docker-entrypoint.sh#L303 +RUN sed "/unset PGPASSWORD/d" -i /usr/local/bin/docker-entrypoint.sh + +HEALTHCHECK --interval=4s --start-period=6s CMD ./pg_healthcheck diff --git a/packaging_automation/templates/docker/postgres-15/postgres-15.tmpl.dockerfile b/packaging_automation/templates/docker/postgres-15/postgres-15.tmpl.dockerfile index b429fe2b..521d4b00 100644 --- a/packaging_automation/templates/docker/postgres-15/postgres-15.tmpl.dockerfile +++ b/packaging_automation/templates/docker/postgres-15/postgres-15.tmpl.dockerfile @@ -1,42 +1,42 @@ -# This file is auto generated from it's template, -# see citusdata/tools/packaging_automation/templates/docker/postgres-15/postgres-15.tmpl.dockerfile. -FROM postgres:{{postgres_version}} -ARG VERSION={{project_version}} -LABEL maintainer="Citus Data https://citusdata.com" \ - org.label-schema.name="Citus" \ - org.label-schema.description="Scalable PostgreSQL for multi-tenant and real-time workloads" \ - org.label-schema.url="https://www.citusdata.com" \ - org.label-schema.vcs-url="https://github.com/citusdata/citus" \ - org.label-schema.vendor="Citus Data, Inc." \ - org.label-schema.version=${VERSION} \ - org.label-schema.schema-version="1.0" - -ENV CITUS_VERSION ${VERSION}.citus-1 - -# install Citus -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ - ca-certificates \ - curl \ - && curl -s https://install.citusdata.com/community/deb.sh | bash \ - && apt-get install -y postgresql-$PG_MAJOR-citus-{{project_minor_version}}=$CITUS_VERSION \ - postgresql-$PG_MAJOR-hll=2.18.citus-1 \ - postgresql-$PG_MAJOR-topn=2.6.0.citus-1 \ - && apt-get purge -y --auto-remove curl \ - && rm -rf /var/lib/apt/lists/* - -# add citus to default PostgreSQL config -RUN echo "shared_preload_libraries='citus'" >> /usr/share/postgresql/postgresql.conf.sample - -# add scripts to run after initdb -COPY 001-create-citus-extension.sql /docker-entrypoint-initdb.d/ - -# add health check script -COPY pg_healthcheck wait-for-manager.sh / -RUN chmod +x /wait-for-manager.sh - -# entry point unsets PGPASSWORD, but we need it to connect to workers -# https://github.com/docker-library/postgres/blob/33bccfcaddd0679f55ee1028c012d26cd196537d/12/docker-entrypoint.sh#L303 -RUN sed "/unset PGPASSWORD/d" -i /usr/local/bin/docker-entrypoint.sh - -HEALTHCHECK --interval=4s --start-period=6s CMD ./pg_healthcheck +# This file is auto generated from it's template, +# see citusdata/tools/packaging_automation/templates/docker/postgres-15/postgres-15.tmpl.dockerfile. +FROM postgres:{{postgres_version}} +ARG VERSION={{project_version}} +LABEL maintainer="Citus Data https://citusdata.com" \ + org.label-schema.name="Citus" \ + org.label-schema.description="Scalable PostgreSQL for multi-tenant and real-time workloads" \ + org.label-schema.url="https://www.citusdata.com" \ + org.label-schema.vcs-url="https://github.com/citusdata/citus" \ + org.label-schema.vendor="Citus Data, Inc." \ + org.label-schema.version=${VERSION} \ + org.label-schema.schema-version="1.0" + +ENV CITUS_VERSION ${VERSION}.citus-1 + +# install Citus +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + ca-certificates \ + curl \ + && curl -s https://install.citusdata.com/community/deb.sh | bash \ + && apt-get install -y postgresql-$PG_MAJOR-citus-{{project_minor_version}}=$CITUS_VERSION \ + postgresql-$PG_MAJOR-hll=2.18.citus-1 \ + postgresql-$PG_MAJOR-topn=2.6.0.citus-1 \ + && apt-get purge -y --auto-remove curl \ + && rm -rf /var/lib/apt/lists/* + +# add citus to default PostgreSQL config +RUN echo "shared_preload_libraries='citus'" >> /usr/share/postgresql/postgresql.conf.sample + +# add scripts to run after initdb +COPY 001-create-citus-extension.sql /docker-entrypoint-initdb.d/ + +# add health check script +COPY pg_healthcheck wait-for-manager.sh / +RUN chmod +x /wait-for-manager.sh + +# entry point unsets PGPASSWORD, but we need it to connect to workers +# https://github.com/docker-library/postgres/blob/33bccfcaddd0679f55ee1028c012d26cd196537d/12/docker-entrypoint.sh#L303 +RUN sed "/unset PGPASSWORD/d" -i /usr/local/bin/docker-entrypoint.sh + +HEALTHCHECK --interval=4s --start-period=6s CMD ./pg_healthcheck diff --git a/packaging_automation/templates/multi_extension_out_prepare_release.tmpl b/packaging_automation/templates/multi_extension_out_prepare_release.tmpl index 4ca90db5..d81ea93d 100644 --- a/packaging_automation/templates/multi_extension_out_prepare_release.tmpl +++ b/packaging_automation/templates/multi_extension_out_prepare_release.tmpl @@ -1,15 +1,15 @@ --- Test downgrade to {{current_schema_version}} from {{upcoming_minor_version}} -ALTER EXTENSION citus UPDATE TO '{{upcoming_minor_version}}'; -ALTER EXTENSION citus UPDATE TO '{{current_schema_version}}'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at {{upcoming_minor_version}} -ALTER EXTENSION citus UPDATE TO '{{upcoming_minor_version}}'; -SELECT * FROM print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) +-- Test downgrade to {{current_schema_version}} from {{upcoming_minor_version}} +ALTER EXTENSION citus UPDATE TO '{{upcoming_minor_version}}'; +ALTER EXTENSION citus UPDATE TO '{{current_schema_version}}'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at {{upcoming_minor_version}} +ALTER EXTENSION citus UPDATE TO '{{upcoming_minor_version}}'; +SELECT * FROM print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) diff --git a/packaging_automation/templates/multi_extension_sql_prepare_release.tmpl b/packaging_automation/templates/multi_extension_sql_prepare_release.tmpl index ac9d3644..c5a60a40 100644 --- a/packaging_automation/templates/multi_extension_sql_prepare_release.tmpl +++ b/packaging_automation/templates/multi_extension_sql_prepare_release.tmpl @@ -1,9 +1,9 @@ --- Test downgrade to {{current_schema_version}} from {{upcoming_minor_version}} -ALTER EXTENSION citus UPDATE TO '{{upcoming_minor_version}}'; -ALTER EXTENSION citus UPDATE TO '{{current_schema_version}}'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM print_extension_changes(); - --- Snapshot of state at {{upcoming_minor_version}} -ALTER EXTENSION citus UPDATE TO '{{upcoming_minor_version}}'; -SELECT * FROM print_extension_changes(); +-- Test downgrade to {{current_schema_version}} from {{upcoming_minor_version}} +ALTER EXTENSION citus UPDATE TO '{{upcoming_minor_version}}'; +ALTER EXTENSION citus UPDATE TO '{{current_schema_version}}'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM print_extension_changes(); + +-- Snapshot of state at {{upcoming_minor_version}} +ALTER EXTENSION citus UPDATE TO '{{upcoming_minor_version}}'; +SELECT * FROM print_extension_changes(); diff --git a/packaging_automation/templates/pg-auto-failover-enterprise-pkgvars.tmpl b/packaging_automation/templates/pg-auto-failover-enterprise-pkgvars.tmpl index c23b96df..f92ab2bb 100644 --- a/packaging_automation/templates/pg-auto-failover-enterprise-pkgvars.tmpl +++ b/packaging_automation/templates/pg-auto-failover-enterprise-pkgvars.tmpl @@ -1,6 +1,6 @@ -rpm_pkgname=pg-auto-failover-enterprise -deb_pkgname=auto-failover-enterprise -hubproj=citus-ha -pkgdesc='Postgres extension for automated failover and high-availability' -pkglatest={{version}} -versioning=fancy +rpm_pkgname=pg-auto-failover-enterprise +deb_pkgname=auto-failover-enterprise +hubproj=citus-ha +pkgdesc='Postgres extension for automated failover and high-availability' +pkglatest={{version}} +versioning=fancy diff --git a/packaging_automation/templates/pg-auto-failover-enterprise.spec.tmpl b/packaging_automation/templates/pg-auto-failover-enterprise.spec.tmpl index ba28928f..af199873 100644 --- a/packaging_automation/templates/pg-auto-failover-enterprise.spec.tmpl +++ b/packaging_automation/templates/pg-auto-failover-enterprise.spec.tmpl @@ -1,374 +1,374 @@ -%global pgmajorversion 11 -%global pgpackageversion 11 -%global pginstdir /usr/pgsql-%{pgpackageversion} -%global sname pg-auto-failover-enterprise -%global extname pgautofailover -%global debug_package %{nil} -%global unencrypted_package "%{getenv:UNENCRYPTED_PACKAGE}" - -Summary: Auto-HA support for Citus -Name: %{sname}%{?pkginfix}_%{pgmajorversion} -Provides: %{sname}_%{pgmajorversion} -Conflicts: %{sname}_%{pgmajorversion} -Version: {{rpm_version}} -Release: 1%{dist} -License: Commercial -Group: Applications/Databases -Source0: https://github.com/citusdata/citus-ha/archive/v{{version}}.tar.gz -URL: https://github.com/citusdata/citus-ha -BuildRequires: postgresql%{pgmajorversion}-devel postgresql%{pgmajorversion}-server libxml2-devel -BuildRequires: libxslt-devel openssl-devel pam-devel readline-devel -Requires: postgresql%{pgmajorversion}-server postgresql%{pgmajorversion}-contrib openssl -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -%description -This extension implements a set of functions to provide High Availability to -Postgres. - -%prep -%setup -q -n %{sname}-%{version} - -%build - -# Flags taken from: https://liquid.microsoft.com/Web/Object/Read/ms.security/Requirements/Microsoft.Security.SystemsADM.10203#guide -SECURITY_CFLAGS="-fstack-protector-strong -D_FORTIFY_SOURCE=2 -O2 -z noexecstack -Wl,-z,relro -Wl,-z,now -Wformat -Wformat-security -Werror=format-security" -SHARED_LIB_SECURITY_CFLAGS="-fpic" -EXECUTABLE_SECURITY_CFLAGS="-fpie -Wl,-pie -Wl,-z,defs" - -currentgccver="$(gcc -dumpversion)" -requiredgccver="4.8.2" -if [ "$(printf '%s\n' "$requiredgccver" "$currentgccver" | sort -V | head -n1)" != "$requiredgccver" ]; then - if [ -z "${UNENCRYPTED_PACKAGE:-}" ]; then - echo ERROR: At least GCC version "$requiredgccver" is needed to build Microsoft packages - exit 1 - else - echo WARNING: Using slower security flags because of outdated compiler - SECURITY_CFLAGS="-fstack-protector-all -D_FORTIFY_SOURCE=2 -O2 -z noexecstack -Wl,-z,relro -Wl,-z,now -Wformat -Wformat-security -Werror=format-security" - fi -fi - -# Check if -Wl,-pie support exists in libpgport.a and remove the flag if it -# does not -# Source: https://stackoverflow.com/a/1351771/2570866 -if ! readelf --relocs %{pginstdir}/lib/libpgport.a | grep -E '(GOT|PLT|JU?MP_SLOT)' > /dev/null; then - echo WARNING: Not compiling with -Wl,pie flag, this is less secure - EXECUTABLE_SECURITY_CFLAGS="-fpie -Wl,-z,defs" -fi - -PATH=%{pginstdir}/bin:$PATH -make -C src/bin/pg_autoctl %{?_smp_mflags} CFLAGS="$SECURITY_CFLAGS $EXECUTABLE_SECURITY_CFLAGS" -make -C src/monitor %{?_smp_mflags} CFLAGS="$SECURITY_CFLAGS $SHARED_LIB_SECURITY_CFLAGS" -%if 0%{?rhel} && 0%{?rhel} <= 6 -%else - export PYTHONPATH=$(echo /usr/local/lib64/python3.*/site-packages):$(echo /usr/local/lib/python3.*/site-packages) - make man -%endif - - -%install -PATH=%{pginstdir}/bin:$PATH -%make_install -# Install documentation with a better name: -%{__mkdir} -p %{buildroot}%{pginstdir}/doc/extension -%{__cp} README.md %{buildroot}%{pginstdir}/doc/extension/README-%{extname}.md - -# install man pages -%if 0%{?rhel} && 0%{?rhel} <= 6 -%else - %{__mkdir} -p %{buildroot}/usr/share/man/man1 - %{__cp} docs/_build/man/pg_auto_failover.1 %{buildroot}/usr/share/man/man1/ - %{__cp} docs/_build/man/pg_autoctl.1 %{buildroot}/usr/share/man/man1/ - %{__mkdir} -p %{buildroot}/usr/share/man/man5 - %{__cp} docs/_build/man/pg_autoctl.5 %{buildroot}/usr/share/man/man5/ -%endif -%if %{unencrypted_package} == "" - -set -eu -set +x - -dir="%{buildroot}" -libdir="$dir/%{pginstdir}/lib" -mkdir -p "$libdir" - -# List all files to be encrypted and store it in the libdir as secret_files_list -secret_files_list="$libdir/pgautofailover_secret_files.metadata" -find "$dir" -iname "*.so" -o -iname "*.bc" -o -iname "*.control" | sed -e "s@^$dir@@g" > "$secret_files_list" - -PACKAGE_ENCRYPTION_KEY="${PACKAGE_ENCRYPTION_KEY:-}" -if [ -z "$PACKAGE_ENCRYPTION_KEY" ]; then - echo "ERROR: The PACKAGE_ENCRYPTION_KEY environment variable needs to be set" - echo "HINT: If trying to build packages locally, just set it to 'abc' or something" - echo "HINT: If you're trying to build unencrypted packages you should set the UNENCRYPTED_PACKAGE environment variable" - exit 1 -fi - -temp_gnupghome="$(mktemp -d)" -encrypt() { - path_unencrypted="$1" - path_encrypted="$1.gpg" - # encrypt the files using password - # --s2k-* options are there to make sure decrypting/encrypting doesn't - # take minutes - gpg --symmetric \ - --batch \ - --no-tty \ - --yes \ - --cipher-algo AES256 \ - --s2k-mode 3 \ - --s2k-count 1000000 \ - --s2k-digest-algo SHA512 \ - --passphrase-fd 0 \ - --homedir "$temp_gnupghome" \ - --output "$path_encrypted" \ - "$path_unencrypted" \ - <<< "$PACKAGE_ENCRYPTION_KEY" - - # keep permissions and ownership the same, so we can restore it later - # when decrypting - chmod --reference "$path_unencrypted" "$path_encrypted" - chown --reference "$path_unencrypted" "$path_encrypted" - - # remove the unencrypted file from the package - rm "$path_unencrypted" -} - -while read -r unencrypted_file; do - encrypt "$dir$unencrypted_file" -done < "$secret_files_list" - -encrypt %{buildroot}%{pginstdir}/bin/pg_autoctl -chmod -x %{buildroot}%{pginstdir}/bin/pg_autoctl.gpg - -# remove the temporary gpg directory -rm -rf "$temp_gnupghome" - -bindir="$dir/usr/bin" -mkdir -p "$bindir" - -#------- START OF DECRYPT SCRIPT -------- -# Create file used to decrypt -cat > "$bindir/pg-auto-failover-enterprise-pg-%{pgmajorversion}-setup" << EOF -#!/bin/sh - -set -eu - -pg_version=%{pgmajorversion} -libdir="%{pginstdir}/lib" -secret_files_list="\$libdir/pgautofailover_secret_files.metadata" - -# Make sure the script is being run as root -if [ "\$(id -u)" -ne "0" ]; then - echo "ERROR: pg-auto-failover-enterprise-pg-\$pg_version-setup needs to be run as root" - echo "HINT: try running \"sudo pg-auto-failover-enterprise-pg-\$pg_version-setup\" instead" - exit 1 -fi - - -echo " -Your use of this software is subject to the terms and conditions of the license -agreement by which you acquired this software. If you are a volume license -customer, use of this software is subject to your volume license agreement. -You may not use this software if you have not validly acquired a license for -the software from Microsoft or its licensed distributors. - -BY USING THE SOFTWARE, YOU ACCEPT THIS AGREEMENT. -" - -PGAUTOFAILOVER_ACCEPT_LICENSE="\${PGAUTOFAILOVER_ACCEPT_LICENSE:-}" - -interactive_license=false -while [ -z "\$PGAUTOFAILOVER_ACCEPT_LICENSE" ]; do - interactive_license=true - echo "Do you accept these terms? YES/NO" - read -r PGAUTOFAILOVER_ACCEPT_LICENSE -done - -case "\$PGAUTOFAILOVER_ACCEPT_LICENSE" in - YES );; - y|Y|Yes|yes ) - echo "ERROR: Only YES is accepted (all capital letters)" - exit 1; - ;; - * ) - echo "ERROR: Terms of the software must be accepted" - exit 1 -esac - -if [ \$interactive_license = false ]; then - echo "Accepted terms by using PGAUTOFAILOVER_ACCEPT_LICENSE=YES environment variable" -fi - -encryption_disclaimer_text=" -Since pg_auto_failover manages failovers, data is sent over the network between -nodes. It is YOUR RESPONSIBILITY as an operator to ensure that this traffic is -secure. - -Since pg_auto_failover version 1.3.0 (released 2020-05-07) the traffic between -the different nodes in the cluster is encrypted automatically when using the ---ssl-self-signed flag to create the nodes in the cluster. This is done by -using TLS with self-signed certificates. This means that this does NOT protect -against Man-In-The-Middle attacks. This only protects against passive -eavesdropping on the network. - -This automatic TLS setup of self-signed certificates and TLS is NOT DONE when -the cluster was originally created with a pg_auto_failover version before -1.3.0. Even when the cluster is later upgraded to version 1.3.0 or higher. -This is to make sure partially upgraded clusters continue to work. - -To enable TLS on these clusters you can use the 'pg_autoctl enable ssl' -command. It's usage is explained in detail here: -https://pg-auto-failover.readthedocs.io/en/stable/security.html#enable-ssl-connections-on-an-existing-setup - -Keep in mind that when using --ssl-self-signed the clusters is not safe from -Man-In-The-Middle attacks. To secure the traffic completely you need to follow -the practices outlined here: -https://pg-auto-failover.readthedocs.io/en/stable/security.html#using-your-own-ssl-certificates - -Please confirm that you have read this and understand that you should set up -TLS yourself to send traffic between nodes securely: -YES/NO?" - -PGAUTOFAILOVER_ACCEPT_ENCRYPTION_DISCLAIMER="\${PGAUTOFAILOVER_ACCEPT_ENCRYPTION_DISCLAIMER:-}" -while [ -z "\$PGAUTOFAILOVER_ACCEPT_ENCRYPTION_DISCLAIMER" ]; do - echo "\$encryption_disclaimer_text" - read -r PGAUTOFAILOVER_ACCEPT_ENCRYPTION_DISCLAIMER -done - -case "\$PGAUTOFAILOVER_ACCEPT_ENCRYPTION_DISCLAIMER" in - YES );; - y|Y|Yes|yes ) - echo "ERROR: Only YES is accepted (all capital letters)" - exit 1; - ;; - * ) - echo "ERROR: Disclaimer about encrypted traffic must be accepted before installing" - exit 1 -esac - -# create a temporary directory for gpg to use so it doesn't output warnings -temp_gnupghome="\$(mktemp -d)" -PGAUTOFAILOVER_LICENSE_KEY="\${PGAUTOFAILOVER_LICENSE_KEY:-}" -while [ -z "\$PGAUTOFAILOVER_LICENSE_KEY" ]; do - echo '' - echo 'Please enter license key:' - read -r PGAUTOFAILOVER_LICENSE_KEY -done - -# Try to decrypt the first file in the list to check if the key is correct -if ! gpg --output "/dev/null" \ - --batch --no-tty --yes --quiet \ - --passphrase "\$PGAUTOFAILOVER_LICENSE_KEY" \ - --homedir "\$temp_gnupghome" \ - --decrypt "\$(head -n 1 "\$secret_files_list").gpg" 2> /dev/null; then - echo "ERROR: Invalid license key supplied" - exit 1 -fi - -echo "License key is valid" -echo "Installing..." - -decrypt() { - path_unencrypted="\$1" - path_encrypted="\$path_unencrypted.gpg" - # decrypt the encrypted file - gpg --output "\$path_unencrypted" \ - --batch --no-tty --yes --quiet \ - --passphrase "\$PGAUTOFAILOVER_LICENSE_KEY" \ - --homedir "\$temp_gnupghome" \ - --decrypt "\$path_encrypted" - - # restore permissions and ownership - chmod --reference "\$path_encrypted" "\$path_unencrypted" - chown --reference "\$path_encrypted" "\$path_unencrypted" -} - -# Decrypt all the encrypted files -while read -r path_unencrypted; do - decrypt "\$path_unencrypted" -done < "\$secret_files_list" - -decrypt %{pginstdir}/bin/pg_autoctl -chmod +x %{pginstdir}/bin/pg_autoctl - - -# remove the temporary gpg directory -rm -rf "\$temp_gnupghome" -EOF - -chmod +x "$bindir/pg-auto-failover-enterprise-pg-%{pgmajorversion}-setup" - -cat "$bindir/pg-auto-failover-enterprise-pg-%{pgmajorversion}-setup" - -%post -installation_message=" -+--------------------------------------------------------------+ -Please run 'sudo pg-auto-failover-enterprise-pg-%{pgmajorversion}-setup' -to complete the setup of pg_auto_failover enterprise -+--------------------------------------------------------------+ -" -echo "$installation_message" - - -%preun -libdir="%{pginstdir}/lib" - -secret_files_list="$libdir/pgautofailover_secret_files.metadata" - -# Cleanup all de decrypted files since these are not managed by the package -# manager and would be left around otherwise -while read -r path_unencrypted; do - rm -f "$path_unencrypted" -done < "$secret_files_list" - -rm -f %{pginstdir}/bin/pg_autoctl -%endif - -%clean -%{__rm} -rf %{buildroot} - -%files -%defattr(-,root,root,-) -%doc %{pginstdir}/doc/extension/README-%{extname}.md -%if 0%{?rhel} && 0%{?rhel} <= 6 -%else - %doc /usr/share/man/man1/pg_auto_failover.1.gz - %doc /usr/share/man/man1/pg_autoctl.1.gz - %doc /usr/share/man/man5/pg_autoctl.5.gz -%endif -%{pginstdir}/share/extension/%{extname}-*.sql -%if %{unencrypted_package} != "" - %{pginstdir}/lib/%{extname}.so - %{pginstdir}/share/extension/%{extname}.control - %{pginstdir}/bin/pg_autoctl - %ifarch ppc64 ppc64le - %else - %if %{pgmajorversion} >= 11 && %{pgmajorversion} < 90 - %if 0%{?rhel} && 0%{?rhel} <= 6 - %else - %{pginstdir}/lib/bitcode/%{extname}*.bc - %{pginstdir}/lib/bitcode/%{extname}/*.bc - %endif - %endif - %endif -%else - /usr/bin/pg-auto-failover-enterprise-pg-%{pgmajorversion}-setup - %{pginstdir}/lib/pgautofailover_secret_files.metadata - %{pginstdir}/lib/%{extname}.so.gpg - %{pginstdir}/share/extension/%{extname}.control.gpg - %{pginstdir}/bin/pg_autoctl.gpg - %ifarch ppc64 ppc64le - %else - %if %{pgmajorversion} >= 11 && %{pgmajorversion} < 90 - %if 0%{?rhel} && 0%{?rhel} <= 6 - %else - %{pginstdir}/lib/bitcode/%{extname}*.bc.gpg - %{pginstdir}/lib/bitcode/%{extname}/*.bc.gpg - %endif - %endif - %endif -%endif - - - -%changelog -{{changelog}} +%global pgmajorversion 11 +%global pgpackageversion 11 +%global pginstdir /usr/pgsql-%{pgpackageversion} +%global sname pg-auto-failover-enterprise +%global extname pgautofailover +%global debug_package %{nil} +%global unencrypted_package "%{getenv:UNENCRYPTED_PACKAGE}" + +Summary: Auto-HA support for Citus +Name: %{sname}%{?pkginfix}_%{pgmajorversion} +Provides: %{sname}_%{pgmajorversion} +Conflicts: %{sname}_%{pgmajorversion} +Version: {{rpm_version}} +Release: 1%{dist} +License: Commercial +Group: Applications/Databases +Source0: https://github.com/citusdata/citus-ha/archive/v{{version}}.tar.gz +URL: https://github.com/citusdata/citus-ha +BuildRequires: postgresql%{pgmajorversion}-devel postgresql%{pgmajorversion}-server libxml2-devel +BuildRequires: libxslt-devel openssl-devel pam-devel readline-devel +Requires: postgresql%{pgmajorversion}-server postgresql%{pgmajorversion}-contrib openssl +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +%description +This extension implements a set of functions to provide High Availability to +Postgres. + +%prep +%setup -q -n %{sname}-%{version} + +%build + +# Flags taken from: https://liquid.microsoft.com/Web/Object/Read/ms.security/Requirements/Microsoft.Security.SystemsADM.10203#guide +SECURITY_CFLAGS="-fstack-protector-strong -D_FORTIFY_SOURCE=2 -O2 -z noexecstack -Wl,-z,relro -Wl,-z,now -Wformat -Wformat-security -Werror=format-security" +SHARED_LIB_SECURITY_CFLAGS="-fpic" +EXECUTABLE_SECURITY_CFLAGS="-fpie -Wl,-pie -Wl,-z,defs" + +currentgccver="$(gcc -dumpversion)" +requiredgccver="4.8.2" +if [ "$(printf '%s\n' "$requiredgccver" "$currentgccver" | sort -V | head -n1)" != "$requiredgccver" ]; then + if [ -z "${UNENCRYPTED_PACKAGE:-}" ]; then + echo ERROR: At least GCC version "$requiredgccver" is needed to build Microsoft packages + exit 1 + else + echo WARNING: Using slower security flags because of outdated compiler + SECURITY_CFLAGS="-fstack-protector-all -D_FORTIFY_SOURCE=2 -O2 -z noexecstack -Wl,-z,relro -Wl,-z,now -Wformat -Wformat-security -Werror=format-security" + fi +fi + +# Check if -Wl,-pie support exists in libpgport.a and remove the flag if it +# does not +# Source: https://stackoverflow.com/a/1351771/2570866 +if ! readelf --relocs %{pginstdir}/lib/libpgport.a | grep -E '(GOT|PLT|JU?MP_SLOT)' > /dev/null; then + echo WARNING: Not compiling with -Wl,pie flag, this is less secure + EXECUTABLE_SECURITY_CFLAGS="-fpie -Wl,-z,defs" +fi + +PATH=%{pginstdir}/bin:$PATH +make -C src/bin/pg_autoctl %{?_smp_mflags} CFLAGS="$SECURITY_CFLAGS $EXECUTABLE_SECURITY_CFLAGS" +make -C src/monitor %{?_smp_mflags} CFLAGS="$SECURITY_CFLAGS $SHARED_LIB_SECURITY_CFLAGS" +%if 0%{?rhel} && 0%{?rhel} <= 6 +%else + export PYTHONPATH=$(echo /usr/local/lib64/python3.*/site-packages):$(echo /usr/local/lib/python3.*/site-packages) + make man +%endif + + +%install +PATH=%{pginstdir}/bin:$PATH +%make_install +# Install documentation with a better name: +%{__mkdir} -p %{buildroot}%{pginstdir}/doc/extension +%{__cp} README.md %{buildroot}%{pginstdir}/doc/extension/README-%{extname}.md + +# install man pages +%if 0%{?rhel} && 0%{?rhel} <= 6 +%else + %{__mkdir} -p %{buildroot}/usr/share/man/man1 + %{__cp} docs/_build/man/pg_auto_failover.1 %{buildroot}/usr/share/man/man1/ + %{__cp} docs/_build/man/pg_autoctl.1 %{buildroot}/usr/share/man/man1/ + %{__mkdir} -p %{buildroot}/usr/share/man/man5 + %{__cp} docs/_build/man/pg_autoctl.5 %{buildroot}/usr/share/man/man5/ +%endif +%if %{unencrypted_package} == "" + +set -eu +set +x + +dir="%{buildroot}" +libdir="$dir/%{pginstdir}/lib" +mkdir -p "$libdir" + +# List all files to be encrypted and store it in the libdir as secret_files_list +secret_files_list="$libdir/pgautofailover_secret_files.metadata" +find "$dir" -iname "*.so" -o -iname "*.bc" -o -iname "*.control" | sed -e "s@^$dir@@g" > "$secret_files_list" + +PACKAGE_ENCRYPTION_KEY="${PACKAGE_ENCRYPTION_KEY:-}" +if [ -z "$PACKAGE_ENCRYPTION_KEY" ]; then + echo "ERROR: The PACKAGE_ENCRYPTION_KEY environment variable needs to be set" + echo "HINT: If trying to build packages locally, just set it to 'abc' or something" + echo "HINT: If you're trying to build unencrypted packages you should set the UNENCRYPTED_PACKAGE environment variable" + exit 1 +fi + +temp_gnupghome="$(mktemp -d)" +encrypt() { + path_unencrypted="$1" + path_encrypted="$1.gpg" + # encrypt the files using password + # --s2k-* options are there to make sure decrypting/encrypting doesn't + # take minutes + gpg --symmetric \ + --batch \ + --no-tty \ + --yes \ + --cipher-algo AES256 \ + --s2k-mode 3 \ + --s2k-count 1000000 \ + --s2k-digest-algo SHA512 \ + --passphrase-fd 0 \ + --homedir "$temp_gnupghome" \ + --output "$path_encrypted" \ + "$path_unencrypted" \ + <<< "$PACKAGE_ENCRYPTION_KEY" + + # keep permissions and ownership the same, so we can restore it later + # when decrypting + chmod --reference "$path_unencrypted" "$path_encrypted" + chown --reference "$path_unencrypted" "$path_encrypted" + + # remove the unencrypted file from the package + rm "$path_unencrypted" +} + +while read -r unencrypted_file; do + encrypt "$dir$unencrypted_file" +done < "$secret_files_list" + +encrypt %{buildroot}%{pginstdir}/bin/pg_autoctl +chmod -x %{buildroot}%{pginstdir}/bin/pg_autoctl.gpg + +# remove the temporary gpg directory +rm -rf "$temp_gnupghome" + +bindir="$dir/usr/bin" +mkdir -p "$bindir" + +#------- START OF DECRYPT SCRIPT -------- +# Create file used to decrypt +cat > "$bindir/pg-auto-failover-enterprise-pg-%{pgmajorversion}-setup" << EOF +#!/bin/sh + +set -eu + +pg_version=%{pgmajorversion} +libdir="%{pginstdir}/lib" +secret_files_list="\$libdir/pgautofailover_secret_files.metadata" + +# Make sure the script is being run as root +if [ "\$(id -u)" -ne "0" ]; then + echo "ERROR: pg-auto-failover-enterprise-pg-\$pg_version-setup needs to be run as root" + echo "HINT: try running \"sudo pg-auto-failover-enterprise-pg-\$pg_version-setup\" instead" + exit 1 +fi + + +echo " +Your use of this software is subject to the terms and conditions of the license +agreement by which you acquired this software. If you are a volume license +customer, use of this software is subject to your volume license agreement. +You may not use this software if you have not validly acquired a license for +the software from Microsoft or its licensed distributors. + +BY USING THE SOFTWARE, YOU ACCEPT THIS AGREEMENT. +" + +PGAUTOFAILOVER_ACCEPT_LICENSE="\${PGAUTOFAILOVER_ACCEPT_LICENSE:-}" + +interactive_license=false +while [ -z "\$PGAUTOFAILOVER_ACCEPT_LICENSE" ]; do + interactive_license=true + echo "Do you accept these terms? YES/NO" + read -r PGAUTOFAILOVER_ACCEPT_LICENSE +done + +case "\$PGAUTOFAILOVER_ACCEPT_LICENSE" in + YES );; + y|Y|Yes|yes ) + echo "ERROR: Only YES is accepted (all capital letters)" + exit 1; + ;; + * ) + echo "ERROR: Terms of the software must be accepted" + exit 1 +esac + +if [ \$interactive_license = false ]; then + echo "Accepted terms by using PGAUTOFAILOVER_ACCEPT_LICENSE=YES environment variable" +fi + +encryption_disclaimer_text=" +Since pg_auto_failover manages failovers, data is sent over the network between +nodes. It is YOUR RESPONSIBILITY as an operator to ensure that this traffic is +secure. + +Since pg_auto_failover version 1.3.0 (released 2020-05-07) the traffic between +the different nodes in the cluster is encrypted automatically when using the +--ssl-self-signed flag to create the nodes in the cluster. This is done by +using TLS with self-signed certificates. This means that this does NOT protect +against Man-In-The-Middle attacks. This only protects against passive +eavesdropping on the network. + +This automatic TLS setup of self-signed certificates and TLS is NOT DONE when +the cluster was originally created with a pg_auto_failover version before +1.3.0. Even when the cluster is later upgraded to version 1.3.0 or higher. +This is to make sure partially upgraded clusters continue to work. + +To enable TLS on these clusters you can use the 'pg_autoctl enable ssl' +command. It's usage is explained in detail here: +https://pg-auto-failover.readthedocs.io/en/stable/security.html#enable-ssl-connections-on-an-existing-setup + +Keep in mind that when using --ssl-self-signed the clusters is not safe from +Man-In-The-Middle attacks. To secure the traffic completely you need to follow +the practices outlined here: +https://pg-auto-failover.readthedocs.io/en/stable/security.html#using-your-own-ssl-certificates + +Please confirm that you have read this and understand that you should set up +TLS yourself to send traffic between nodes securely: +YES/NO?" + +PGAUTOFAILOVER_ACCEPT_ENCRYPTION_DISCLAIMER="\${PGAUTOFAILOVER_ACCEPT_ENCRYPTION_DISCLAIMER:-}" +while [ -z "\$PGAUTOFAILOVER_ACCEPT_ENCRYPTION_DISCLAIMER" ]; do + echo "\$encryption_disclaimer_text" + read -r PGAUTOFAILOVER_ACCEPT_ENCRYPTION_DISCLAIMER +done + +case "\$PGAUTOFAILOVER_ACCEPT_ENCRYPTION_DISCLAIMER" in + YES );; + y|Y|Yes|yes ) + echo "ERROR: Only YES is accepted (all capital letters)" + exit 1; + ;; + * ) + echo "ERROR: Disclaimer about encrypted traffic must be accepted before installing" + exit 1 +esac + +# create a temporary directory for gpg to use so it doesn't output warnings +temp_gnupghome="\$(mktemp -d)" +PGAUTOFAILOVER_LICENSE_KEY="\${PGAUTOFAILOVER_LICENSE_KEY:-}" +while [ -z "\$PGAUTOFAILOVER_LICENSE_KEY" ]; do + echo '' + echo 'Please enter license key:' + read -r PGAUTOFAILOVER_LICENSE_KEY +done + +# Try to decrypt the first file in the list to check if the key is correct +if ! gpg --output "/dev/null" \ + --batch --no-tty --yes --quiet \ + --passphrase "\$PGAUTOFAILOVER_LICENSE_KEY" \ + --homedir "\$temp_gnupghome" \ + --decrypt "\$(head -n 1 "\$secret_files_list").gpg" 2> /dev/null; then + echo "ERROR: Invalid license key supplied" + exit 1 +fi + +echo "License key is valid" +echo "Installing..." + +decrypt() { + path_unencrypted="\$1" + path_encrypted="\$path_unencrypted.gpg" + # decrypt the encrypted file + gpg --output "\$path_unencrypted" \ + --batch --no-tty --yes --quiet \ + --passphrase "\$PGAUTOFAILOVER_LICENSE_KEY" \ + --homedir "\$temp_gnupghome" \ + --decrypt "\$path_encrypted" + + # restore permissions and ownership + chmod --reference "\$path_encrypted" "\$path_unencrypted" + chown --reference "\$path_encrypted" "\$path_unencrypted" +} + +# Decrypt all the encrypted files +while read -r path_unencrypted; do + decrypt "\$path_unencrypted" +done < "\$secret_files_list" + +decrypt %{pginstdir}/bin/pg_autoctl +chmod +x %{pginstdir}/bin/pg_autoctl + + +# remove the temporary gpg directory +rm -rf "\$temp_gnupghome" +EOF + +chmod +x "$bindir/pg-auto-failover-enterprise-pg-%{pgmajorversion}-setup" + +cat "$bindir/pg-auto-failover-enterprise-pg-%{pgmajorversion}-setup" + +%post +installation_message=" ++--------------------------------------------------------------+ +Please run 'sudo pg-auto-failover-enterprise-pg-%{pgmajorversion}-setup' +to complete the setup of pg_auto_failover enterprise ++--------------------------------------------------------------+ +" +echo "$installation_message" + + +%preun +libdir="%{pginstdir}/lib" + +secret_files_list="$libdir/pgautofailover_secret_files.metadata" + +# Cleanup all de decrypted files since these are not managed by the package +# manager and would be left around otherwise +while read -r path_unencrypted; do + rm -f "$path_unencrypted" +done < "$secret_files_list" + +rm -f %{pginstdir}/bin/pg_autoctl +%endif + +%clean +%{__rm} -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc %{pginstdir}/doc/extension/README-%{extname}.md +%if 0%{?rhel} && 0%{?rhel} <= 6 +%else + %doc /usr/share/man/man1/pg_auto_failover.1.gz + %doc /usr/share/man/man1/pg_autoctl.1.gz + %doc /usr/share/man/man5/pg_autoctl.5.gz +%endif +%{pginstdir}/share/extension/%{extname}-*.sql +%if %{unencrypted_package} != "" + %{pginstdir}/lib/%{extname}.so + %{pginstdir}/share/extension/%{extname}.control + %{pginstdir}/bin/pg_autoctl + %ifarch ppc64 ppc64le + %else + %if %{pgmajorversion} >= 11 && %{pgmajorversion} < 90 + %if 0%{?rhel} && 0%{?rhel} <= 6 + %else + %{pginstdir}/lib/bitcode/%{extname}*.bc + %{pginstdir}/lib/bitcode/%{extname}/*.bc + %endif + %endif + %endif +%else + /usr/bin/pg-auto-failover-enterprise-pg-%{pgmajorversion}-setup + %{pginstdir}/lib/pgautofailover_secret_files.metadata + %{pginstdir}/lib/%{extname}.so.gpg + %{pginstdir}/share/extension/%{extname}.control.gpg + %{pginstdir}/bin/pg_autoctl.gpg + %ifarch ppc64 ppc64le + %else + %if %{pgmajorversion} >= 11 && %{pgmajorversion} < 90 + %if 0%{?rhel} && 0%{?rhel} <= 6 + %else + %{pginstdir}/lib/bitcode/%{extname}*.bc.gpg + %{pginstdir}/lib/bitcode/%{extname}/*.bc.gpg + %endif + %endif + %endif +%endif + + + +%changelog +{{changelog}} diff --git a/packaging_automation/templates/pg-auto-failover-pkgvars.tmpl b/packaging_automation/templates/pg-auto-failover-pkgvars.tmpl index eb229c1d..b1bee84f 100644 --- a/packaging_automation/templates/pg-auto-failover-pkgvars.tmpl +++ b/packaging_automation/templates/pg-auto-failover-pkgvars.tmpl @@ -1,6 +1,6 @@ -rpm_pkgname=pg-auto-failover -deb_pkgname=auto-failover -hubproj=pg_auto_failover -pkgdesc='Postgres extension for automated failover and high-availability' -pkglatest={{version}} -versioning=fancy +rpm_pkgname=pg-auto-failover +deb_pkgname=auto-failover +hubproj=pg_auto_failover +pkgdesc='Postgres extension for automated failover and high-availability' +pkglatest={{version}} +versioning=fancy diff --git a/packaging_automation/templates/pg-auto-failover.spec.tmpl b/packaging_automation/templates/pg-auto-failover.spec.tmpl index 25e96132..b4c4319d 100644 --- a/packaging_automation/templates/pg-auto-failover.spec.tmpl +++ b/packaging_automation/templates/pg-auto-failover.spec.tmpl @@ -1,85 +1,85 @@ -%global pgmajorversion 11 -%global pgpackageversion 11 -%global pginstdir /usr/pgsql-%{pgpackageversion} -%global sname pg-auto-failover -%global extname pgautofailover -%global debug_package %{nil} - -Summary: Postgres extension for automated failover and high-availability -Name: %{sname}%{?pkginfix}_%{pgmajorversion} -Provides: %{sname}_%{pgmajorversion} -Conflicts: %{sname}_%{pgmajorversion} -Version: {{rpm_version}} -Release: 1%{dist} -License: PostgreSQL -Group: Applications/Databases -Source0: https://github.com/citusdata/pg_auto_failover/archive/v{{version}}.tar.gz -URL: https://github.com/citusdata/pg_auto_failover -BuildRequires: postgresql%{pgmajorversion}-devel postgresql%{pgmajorversion}-server libxml2-devel -BuildRequires: libxslt-devel openssl-devel pam-devel readline-devel -Requires: postgresql%{pgmajorversion}-server postgresql%{pgmajorversion}-contrib openssl -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -%description -This extension implements a set of functions to provide High Availability to -Postgres. - -%prep -%setup -q -n %{sname}-%{version} - -%build -PATH=%{pginstdir}/bin:$PATH -make %{?_smp_mflags} -%if 0%{?rhel} && 0%{?rhel} <= 6 -%else - export PYTHONPATH=$(echo /usr/local/lib64/python3.*/site-packages):$(echo /usr/local/lib/python3.*/site-packages) - make man -%endif - -%install -PATH=%{pginstdir}/bin:$PATH -%make_install -# Install documentation with a better name: -%{__mkdir} -p %{buildroot}%{pginstdir}/doc/extension -%{__cp} README.md %{buildroot}%{pginstdir}/doc/extension/README-%{extname}.md - -# install man pages -%if 0%{?rhel} && 0%{?rhel} <= 6 -%else - %{__mkdir} -p %{buildroot}/usr/share/man/man1 - %{__cp} docs/_build/man/pg_auto_failover.1 %{buildroot}/usr/share/man/man1/ - %{__cp} docs/_build/man/pg_autoctl.1 %{buildroot}/usr/share/man/man1/ - %{__mkdir} -p %{buildroot}/usr/share/man/man5 - %{__cp} docs/_build/man/pg_autoctl.5 %{buildroot}/usr/share/man/man5/ -%endif - -%clean -%{__rm} -rf %{buildroot} - -%files -%defattr(-,root,root,-) -%doc %{pginstdir}/doc/extension/README-%{extname}.md -%if 0%{?rhel} && 0%{?rhel} <= 6 -%else - %doc /usr/share/man/man1/pg_auto_failover.1.gz - %doc /usr/share/man/man1/pg_autoctl.1.gz - %doc /usr/share/man/man5/pg_autoctl.5.gz -%endif -%{pginstdir}/lib/%{extname}.so -%{pginstdir}/share/extension/%{extname}-*.sql -%{pginstdir}/share/extension/%{extname}.control -%{pginstdir}/bin/pg_autoctl -%ifarch ppc64 ppc64le - %else - %if %{pgmajorversion} >= 11 && %{pgmajorversion} < 90 - %if 0%{?rhel} && 0%{?rhel} <= 6 - %else - %{pginstdir}/lib/bitcode/%{extname}*.bc - %{pginstdir}/lib/bitcode/%{extname}/*.bc - %endif - %endif -%endif - - -%changelog -{{changelog}} +%global pgmajorversion 11 +%global pgpackageversion 11 +%global pginstdir /usr/pgsql-%{pgpackageversion} +%global sname pg-auto-failover +%global extname pgautofailover +%global debug_package %{nil} + +Summary: Postgres extension for automated failover and high-availability +Name: %{sname}%{?pkginfix}_%{pgmajorversion} +Provides: %{sname}_%{pgmajorversion} +Conflicts: %{sname}_%{pgmajorversion} +Version: {{rpm_version}} +Release: 1%{dist} +License: PostgreSQL +Group: Applications/Databases +Source0: https://github.com/citusdata/pg_auto_failover/archive/v{{version}}.tar.gz +URL: https://github.com/citusdata/pg_auto_failover +BuildRequires: postgresql%{pgmajorversion}-devel postgresql%{pgmajorversion}-server libxml2-devel +BuildRequires: libxslt-devel openssl-devel pam-devel readline-devel +Requires: postgresql%{pgmajorversion}-server postgresql%{pgmajorversion}-contrib openssl +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +%description +This extension implements a set of functions to provide High Availability to +Postgres. + +%prep +%setup -q -n %{sname}-%{version} + +%build +PATH=%{pginstdir}/bin:$PATH +make %{?_smp_mflags} +%if 0%{?rhel} && 0%{?rhel} <= 6 +%else + export PYTHONPATH=$(echo /usr/local/lib64/python3.*/site-packages):$(echo /usr/local/lib/python3.*/site-packages) + make man +%endif + +%install +PATH=%{pginstdir}/bin:$PATH +%make_install +# Install documentation with a better name: +%{__mkdir} -p %{buildroot}%{pginstdir}/doc/extension +%{__cp} README.md %{buildroot}%{pginstdir}/doc/extension/README-%{extname}.md + +# install man pages +%if 0%{?rhel} && 0%{?rhel} <= 6 +%else + %{__mkdir} -p %{buildroot}/usr/share/man/man1 + %{__cp} docs/_build/man/pg_auto_failover.1 %{buildroot}/usr/share/man/man1/ + %{__cp} docs/_build/man/pg_autoctl.1 %{buildroot}/usr/share/man/man1/ + %{__mkdir} -p %{buildroot}/usr/share/man/man5 + %{__cp} docs/_build/man/pg_autoctl.5 %{buildroot}/usr/share/man/man5/ +%endif + +%clean +%{__rm} -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc %{pginstdir}/doc/extension/README-%{extname}.md +%if 0%{?rhel} && 0%{?rhel} <= 6 +%else + %doc /usr/share/man/man1/pg_auto_failover.1.gz + %doc /usr/share/man/man1/pg_autoctl.1.gz + %doc /usr/share/man/man5/pg_autoctl.5.gz +%endif +%{pginstdir}/lib/%{extname}.so +%{pginstdir}/share/extension/%{extname}-*.sql +%{pginstdir}/share/extension/%{extname}.control +%{pginstdir}/bin/pg_autoctl +%ifarch ppc64 ppc64le + %else + %if %{pgmajorversion} >= 11 && %{pgmajorversion} < 90 + %if 0%{?rhel} && 0%{?rhel} <= 6 + %else + %{pginstdir}/lib/bitcode/%{extname}*.bc + %{pginstdir}/lib/bitcode/%{extname}/*.bc + %endif + %endif +%endif + + +%changelog +{{changelog}} diff --git a/packaging_automation/templates/pgxn/META.tmpl.json b/packaging_automation/templates/pgxn/META.tmpl.json index 4683b5d1..29446af7 100644 --- a/packaging_automation/templates/pgxn/META.tmpl.json +++ b/packaging_automation/templates/pgxn/META.tmpl.json @@ -1,54 +1,54 @@ -{ - "name": "citus", - "abstract": "Scalable PostgreSQL for real-time workloads", - "description": "Citus horizontally scales PostgreSQL across commodity servers using sharding and replication. Its query engine parallelizes incoming SQL queries across these servers to enable real-time responses on large datasets.", - "version": "{{project_version}}", - "maintainer": "\"Citus Data\" ", - "license": "agpl_3", - "provides": { - "citus": { - "abstract": "Citus Distributed Database", - "file": "citus.so", - "docfile": "README.md", - "version": "{{project_version}}" - } - }, - "prereqs": { - "runtime": { - "requires": { - "PostgreSQL": "13.0.0" - } - }, - "test": { - "requires": { - "plpgsql": 0 - } - } - }, - "release_status": "stable", - "resources": { - "homepage": "https://www.citusdata.com", - "bugtracker": { - "web": "https://github.com/citusdata/citus/issues", - "mailto": "support@citusdata.com" - }, - "repository": { - "url": "git://github.com/citusdata/citus.git", - "web": "https://github.com/citusdata/citus", - "type": "git" - } - }, - "generated_by": "David E. Wheeler", - "meta-spec": { - "version": "1.0.0", - "url": "http://pgxn.org/meta/spec.txt" - }, - "tags": [ - "sharding", - "replication", - "parallel", - "distributed", - "horizontal", - "analytics" - ] -} +{ + "name": "citus", + "abstract": "Scalable PostgreSQL for real-time workloads", + "description": "Citus horizontally scales PostgreSQL across commodity servers using sharding and replication. Its query engine parallelizes incoming SQL queries across these servers to enable real-time responses on large datasets.", + "version": "{{project_version}}", + "maintainer": "\"Citus Data\" ", + "license": "agpl_3", + "provides": { + "citus": { + "abstract": "Citus Distributed Database", + "file": "citus.so", + "docfile": "README.md", + "version": "{{project_version}}" + } + }, + "prereqs": { + "runtime": { + "requires": { + "PostgreSQL": "13.0.0" + } + }, + "test": { + "requires": { + "plpgsql": 0 + } + } + }, + "release_status": "stable", + "resources": { + "homepage": "https://www.citusdata.com", + "bugtracker": { + "web": "https://github.com/citusdata/citus/issues", + "mailto": "support@citusdata.com" + }, + "repository": { + "url": "git://github.com/citusdata/citus.git", + "web": "https://github.com/citusdata/citus", + "type": "git" + } + }, + "generated_by": "David E. Wheeler", + "meta-spec": { + "version": "1.0.0", + "url": "http://pgxn.org/meta/spec.txt" + }, + "tags": [ + "sharding", + "replication", + "parallel", + "distributed", + "horizontal", + "analytics" + ] +} diff --git a/packaging_automation/templates/pgxn/pkgvars.tmpl b/packaging_automation/templates/pgxn/pkgvars.tmpl index a4817f86..39a2fb42 100644 --- a/packaging_automation/templates/pgxn/pkgvars.tmpl +++ b/packaging_automation/templates/pgxn/pkgvars.tmpl @@ -1,3 +1,3 @@ -pkgname=citus -pkgdesc='Citus (Open-Source)' -pkglatest={{project_version}} +pkgname=citus +pkgdesc='Citus (Open-Source)' +pkglatest={{project_version}} diff --git a/packaging_automation/test_citus_package.py b/packaging_automation/test_citus_package.py index 7c7213c3..4935898e 100644 --- a/packaging_automation/test_citus_package.py +++ b/packaging_automation/test_citus_package.py @@ -1,129 +1,129 @@ -import argparse -import os -import subprocess -import shlex -import requests -from enum import Enum -import sys -from typing import List - -from .common_tool_methods import ( - get_supported_postgres_release_versions, - get_minor_version, -) - -POSTGRES_MATRIX_FILE = "postgres-matrix.yml" -POSTGRES_MATRIX_WEB_ADDRESS = "https://raw.githubusercontent.com/citusdata/packaging/all-citus/postgres-matrix.yml" - - -def run_command(command: str) -> int: - with subprocess.Popen( - shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.STDOUT - ) as process: - for line in iter(process.stdout.readline, b""): # b'\n'-separated lines - print(line.decode("utf-8"), end=" ") - exitcode = process.wait() - return exitcode - - -class TestPlatform(Enum): - el_7 = {"name": "el/7", "docker_image_name": "el-7"} - el_8 = {"name": "el/8", "docker_image_name": "el-8"} - centos_8 = {"name": "centos/8", "docker_image_name": "centos-8"} - centos_7 = {"name": "centos/7", "docker_image_name": "centos-7"} - ol_7 = {"name": "ol/7", "docker_image_name": "ol-7"} - ol_8 = {"name": "ol/8", "docker_image_name": "ol-8"} - debian_stretch = {"name": "debian/stretch", "docker_image_name": "debian-stretch"} - debian_buster = {"name": "debian/buster", "docker_image_name": "debian-buster"} - debian_bullseye = { - "name": "debian/bullseye", - "docker_image_name": "debian-bullseye", - } - debian_bookworm = { - "name": "debian/bookworm", - "docker_image_name": "debian-bookworm", - } - ubuntu_bionic = {"name": "ubuntu/bionic", "docker_image_name": "ubuntu-bionic"} - ubuntu_focal = {"name": "ubuntu/focal", "docker_image_name": "ubuntu-focal"} - ubuntu_jammy = {"name": "ubuntu/jammy", "docker_image_name": "ubuntu-jammy"} - ubuntu_kinetic = {"name": "ubuntu/kinetic", "docker_image_name": "ubuntu-kinetic"} - undefined = {"name": "undefined", "docker_image_name": "undefined"} - - -def get_test_platform_for_os_release(os_release: str) -> TestPlatform: - result = TestPlatform.undefined - for tp in TestPlatform: - if tp.value["name"] == os_release: - result = tp - return result - - -def get_postgres_versions_from_matrix_file(project_version: str) -> List[str]: - r = requests.get(POSTGRES_MATRIX_WEB_ADDRESS, allow_redirects=True, timeout=60) - - with open(POSTGRES_MATRIX_FILE, "wb") as writer: - writer.write(r.content) - pg_versions = get_supported_postgres_release_versions( - POSTGRES_MATRIX_FILE, project_version - ) - - return pg_versions - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--project_version", required=True) - parser.add_argument("--pg_major_version") - parser.add_argument("--os_release", choices=[t.value["name"] for t in TestPlatform]) - - args = parser.parse_args() - test_platform = get_test_platform_for_os_release(args.os_release) - minor_project_version = get_minor_version(args.project_version) - - platform = args.os_release - - postgres_versions = get_postgres_versions_from_matrix_file(args.project_version) - - print(f"This version of Citus supports following pg versions: {postgres_versions}") - - os.chdir("test-images") - return_codes = {} - - if args.pg_major_version: - postgres_versions = [p for p in postgres_versions if p == args.pg_major_version] - - if len(postgres_versions) == 0: - raise ValueError("At least one supported postgres version is required") - - for postgres_version in postgres_versions: - print(f"Testing package for following pg version: {postgres_version}") - docker_image_name = ( - f"test:{test_platform.value['docker_image_name']}-{postgres_version}" - ) - build_command = ( - f"docker build --pull --no-cache " - f"-t {docker_image_name} " - f"-f {test_platform.value['docker_image_name']}/Dockerfile " - f"--build-arg CITUS_VERSION={args.project_version} " - f"--build-arg PG_MAJOR={postgres_version} " - f"--build-arg CITUS_MAJOR_VERSION={minor_project_version} ." - ) - print(build_command) - return_build = run_command(build_command) - return_run = run_command( - f"docker run -e POSTGRES_VERSION={postgres_version} {docker_image_name} " - ) - return_codes[f"{docker_image_name}-build"] = return_build - return_codes[f"{docker_image_name}-run"] = return_run - - error_exists = False - print("-----------------Summary Report------------------") - for key, value in return_codes.items(): - if value > 0: - error_exists = True - print(f"{key}: {'Success' if value == 0 else f'Fail. ErrorCode: {value}'}") - summary_error = "FAILED :(" if error_exists else "SUCCESS :)" - print(f"------------------------{summary_error}------------------------") - - if error_exists: - sys.exit("Failed") +import argparse +import os +import subprocess +import shlex +import requests +from enum import Enum +import sys +from typing import List + +from .common_tool_methods import ( + get_supported_postgres_release_versions, + get_minor_version, +) + +POSTGRES_MATRIX_FILE = "postgres-matrix.yml" +POSTGRES_MATRIX_WEB_ADDRESS = "https://raw.githubusercontent.com/citusdata/packaging/all-citus/postgres-matrix.yml" + + +def run_command(command: str) -> int: + with subprocess.Popen( + shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.STDOUT + ) as process: + for line in iter(process.stdout.readline, b""): # b'\n'-separated lines + print(line.decode("utf-8"), end=" ") + exitcode = process.wait() + return exitcode + + +class TestPlatform(Enum): + el_7 = {"name": "el/7", "docker_image_name": "el-7"} + el_8 = {"name": "el/8", "docker_image_name": "el-8"} + centos_8 = {"name": "centos/8", "docker_image_name": "centos-8"} + centos_7 = {"name": "centos/7", "docker_image_name": "centos-7"} + ol_7 = {"name": "ol/7", "docker_image_name": "ol-7"} + ol_8 = {"name": "ol/8", "docker_image_name": "ol-8"} + debian_stretch = {"name": "debian/stretch", "docker_image_name": "debian-stretch"} + debian_buster = {"name": "debian/buster", "docker_image_name": "debian-buster"} + debian_bullseye = { + "name": "debian/bullseye", + "docker_image_name": "debian-bullseye", + } + debian_bookworm = { + "name": "debian/bookworm", + "docker_image_name": "debian-bookworm", + } + ubuntu_bionic = {"name": "ubuntu/bionic", "docker_image_name": "ubuntu-bionic"} + ubuntu_focal = {"name": "ubuntu/focal", "docker_image_name": "ubuntu-focal"} + ubuntu_jammy = {"name": "ubuntu/jammy", "docker_image_name": "ubuntu-jammy"} + ubuntu_kinetic = {"name": "ubuntu/kinetic", "docker_image_name": "ubuntu-kinetic"} + undefined = {"name": "undefined", "docker_image_name": "undefined"} + + +def get_test_platform_for_os_release(os_release: str) -> TestPlatform: + result = TestPlatform.undefined + for tp in TestPlatform: + if tp.value["name"] == os_release: + result = tp + return result + + +def get_postgres_versions_from_matrix_file(project_version: str) -> List[str]: + r = requests.get(POSTGRES_MATRIX_WEB_ADDRESS, allow_redirects=True, timeout=60) + + with open(POSTGRES_MATRIX_FILE, "wb") as writer: + writer.write(r.content) + pg_versions = get_supported_postgres_release_versions( + POSTGRES_MATRIX_FILE, project_version + ) + + return pg_versions + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_version", required=True) + parser.add_argument("--pg_major_version") + parser.add_argument("--os_release", choices=[t.value["name"] for t in TestPlatform]) + + args = parser.parse_args() + test_platform = get_test_platform_for_os_release(args.os_release) + minor_project_version = get_minor_version(args.project_version) + + platform = args.os_release + + postgres_versions = get_postgres_versions_from_matrix_file(args.project_version) + + print(f"This version of Citus supports following pg versions: {postgres_versions}") + + os.chdir("test-images") + return_codes = {} + + if args.pg_major_version: + postgres_versions = [p for p in postgres_versions if p == args.pg_major_version] + + if len(postgres_versions) == 0: + raise ValueError("At least one supported postgres version is required") + + for postgres_version in postgres_versions: + print(f"Testing package for following pg version: {postgres_version}") + docker_image_name = ( + f"test:{test_platform.value['docker_image_name']}-{postgres_version}" + ) + build_command = ( + f"docker build --pull --no-cache " + f"-t {docker_image_name} " + f"-f {test_platform.value['docker_image_name']}/Dockerfile " + f"--build-arg CITUS_VERSION={args.project_version} " + f"--build-arg PG_MAJOR={postgres_version} " + f"--build-arg CITUS_MAJOR_VERSION={minor_project_version} ." + ) + print(build_command) + return_build = run_command(build_command) + return_run = run_command( + f"docker run -e POSTGRES_VERSION={postgres_version} {docker_image_name} " + ) + return_codes[f"{docker_image_name}-build"] = return_build + return_codes[f"{docker_image_name}-run"] = return_run + + error_exists = False + print("-----------------Summary Report------------------") + for key, value in return_codes.items(): + if value > 0: + error_exists = True + print(f"{key}: {'Success' if value == 0 else f'Fail. ErrorCode: {value}'}") + summary_error = "FAILED :(" if error_exists else "SUCCESS :)" + print(f"------------------------{summary_error}------------------------") + + if error_exists: + sys.exit("Failed") diff --git a/packaging_automation/tests/files/citus.spec b/packaging_automation/tests/files/citus.spec index 0773292a..859e2e7c 100644 --- a/packaging_automation/tests/files/citus.spec +++ b/packaging_automation/tests/files/citus.spec @@ -1,408 +1,408 @@ -%global pgmajorversion 11 -%global pgpackageversion 11 -%global pginstdir /usr/pgsql-%{pgpackageversion} -%global sname citus -%global debug_package %{nil} - -Summary: PostgreSQL-based distributed RDBMS -Name: %{sname}%{?pkginfix}_%{pgmajorversion} -Provides: %{sname}_%{pgmajorversion} -Conflicts: %{sname}_%{pgmajorversion} -Version: 10.1.4.citus -Release: 1%{dist} -License: AGPLv3 -Group: Applications/Databases -Source0: https://github.com/citusdata/citus/archive/v10.2.4.tar.gz -URL: https://github.com/citusdata/citus -BuildRequires: postgresql%{pgmajorversion}-devel libcurl-devel -Requires: postgresql%{pgmajorversion}-server -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -%description -Citus horizontally scales PostgreSQL across commodity servers -using sharding and replication. Its query engine parallelizes -incoming SQL queries across these servers to enable real-time -responses on large datasets. - -Citus extends the underlying database rather than forking it, -which gives developers and enterprises the power and familiarity -of a traditional relational database. As an extension, Citus -supports new PostgreSQL releases, allowing users to benefit from -new features while maintaining compatibility with existing -PostgreSQL tools. Note that Citus supports many (but not all) SQL -commands. - -%prep -%setup -q -n %{sname}-%{version} - -%build - -currentgccver="$(gcc -dumpversion)" -requiredgccver="4.8.2" -if [ "$(printf '%s\n' "$requiredgccver" "$currentgccver" | sort -V | head -n1)" != "$requiredgccver" ]; then - echo ERROR: At least GCC version "$requiredgccver" is needed to build with security flags - exit 1 -fi - -%configure PG_CONFIG=%{pginstdir}/bin/pg_config --with-extra-version="%{?conf_extra_version}" --with-security-flags CC=$(command -v gcc) -make %{?_smp_mflags} - -%install -%make_install -# Install documentation with a better name: -%{__mkdir} -p %{buildroot}%{pginstdir}/doc/extension -%{__cp} README.md %{buildroot}%{pginstdir}/doc/extension/README-%{sname}.md -%{__cp} NOTICE %{buildroot}%{pginstdir}/doc/extension/NOTICE-%{sname} -# Set paths to be packaged other than LICENSE, README & CHANGELOG.md -echo %{pginstdir}/include/server/citus_*.h >> installation_files.list -echo %{pginstdir}/include/server/distributed/*.h >> installation_files.list -echo %{pginstdir}/lib/%{sname}.so >> installation_files.list -[[ -f %{buildroot}%{pginstdir}/lib/citus_columnar.so ]] && echo %{pginstdir}/lib/citus_columnar.so >> installation_files.list -[[ -f %{buildroot}%{pginstdir}/lib/citus_decoders/pgoutput.so ]] && echo %{pginstdir}/lib/citus_decoders/pgoutput.so >> installation_files.list -[[ -f %{buildroot}%{pginstdir}/lib/citus_decoders/wal2json.so ]] && echo %{pginstdir}/lib/citus_decoders/wal2json.so >> installation_files.list -[[ -f %{buildroot}%{pginstdir}/lib/citus_pgoutput.so ]] && echo %{pginstdir}/lib/citus_pgoutput.so >> installation_files.list -[[ -f %{buildroot}%{pginstdir}/lib/citus_wal2json.so ]] && echo %{pginstdir}/lib/citus_wal2json.so >> installation_files.list -echo %{pginstdir}/share/extension/%{sname}-*.sql >> installation_files.list -echo %{pginstdir}/share/extension/%{sname}.control >> installation_files.list -# Since files below may be non-existent in some versions, ignoring the error in case of file absence -[[ -f %{buildroot}%{pginstdir}/share/extension/citus_columnar.control ]] && echo %{pginstdir}/share/extension/citus_columnar.control >> installation_files.list -columnar_sql_files=(`find %{buildroot}%{pginstdir}/share/extension -maxdepth 1 -name "columnar-*.sql"`) -if [ ${#columnar_sql_files[@]} -gt 0 ]; then - echo %{pginstdir}/share/extension/columnar-*.sql >> installation_files.list -fi - -citus_columnar_sql_files=(`find %{buildroot}%{pginstdir}/share/extension -maxdepth 1 -name "citus_columnar-*.sql"`) -if [ ${#citus_columnar_sql_files[@]} -gt 0 ]; then - echo %{pginstdir}/share/extension/citus_columnar-*.sql >> installation_files.list -fi - -[[ -f %{buildroot}%{pginstdir}/bin/pg_send_cancellation ]] && echo %{pginstdir}/bin/pg_send_cancellation >> installation_files.list -%ifarch ppc64 ppc64le -%else - %if 0%{?rhel} && 0%{?rhel} <= 6 - %else - echo %{pginstdir}/lib/bitcode/%{sname}/*.bc >> installation_files.list - echo %{pginstdir}/lib/bitcode/%{sname}*.bc >> installation_files.list - echo %{pginstdir}/lib/bitcode/%{sname}/*/*.bc >> installation_files.list - - # Columnar does not exist in Citus versions < 10.0 - # At this point, we don't have %{pginstdir}, - # so first check build directory for columnar. - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/columnar/ ]] && echo %{pginstdir}/lib/bitcode/columnar/*.bc >> installation_files.list - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_columnar/ ]] && echo %{pginstdir}/lib/bitcode/citus_columnar/*.bc >> installation_files.list - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_columnar/safeclib ]] && echo %{pginstdir}/lib/bitcode/citus_columnar/safeclib/*.bc >> installation_files.list - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_pgoutput ]] && echo %{pginstdir}/lib/bitcode/citus_pgoutput/*.bc >> installation_files.list - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_wal2json ]] && echo %{pginstdir}/lib/bitcode/citus_wal2json/*.bc >> installation_files.list - %endif -%endif - -%clean -%{__rm} -rf %{buildroot} - -%files -f installation_files.list -%files -%defattr(-,root,root,-) -%doc CHANGELOG.md -%if 0%{?rhel} && 0%{?rhel} <= 6 -%doc LICENSE -%else -%license LICENSE -%endif -%doc %{pginstdir}/doc/extension/README-%{sname}.md -%doc %{pginstdir}/doc/extension/NOTICE-%{sname} - -%changelog -* Tue Feb 01 2022 - Gurkan Indibay 10.1.4.citus-1 -- Official 10.1.4 release of Citus - -* Mon Nov 29 2021 - Gurkan Indibay 10.2.3.citus-1 -- Official 10.2.3 release of Citus - -* Fri Nov 12 2021 - Gurkan Indibay 10.0.6.citus-1 -- Official 10.0.6 release of Citus - -* Mon Nov 08 2021 - Gurkan Indibay 9.5.10.citus-1 -- Official 9.5.10 release of Citus - -* Thu Nov 04 2021 - Gurkan Indibay 9.2.8.citus-1 -- Official 9.2.8 release of Citus - -* Wed Nov 03 2021 - Gurkan Indibay 9.2.7.citus-1 -- Official 9.2.7 release of Citus - -* Thu Oct 14 2021 - Gurkan Indibay 10.2.2.citus-1 -- Official 10.2.2 release of Citus - -* Fri Sep 24 2021 - Gurkan Indibay 10.2.1.citus-1 -- Official 10.2.1 release of Citus - -* Fri Sep 17 2021 - Gurkan Indibay 10.1.3.citus-1 -- Official 10.1.3 release of Citus - -* Thu Sep 16 2021 - Gurkan Indibay 10.2.0.citus-1 -- Official 10.2.0 release of Citus - -* Tue Aug 17 2021 - Gurkan Indibay 10.1.2.citus-1 -- Official 10.1.2 release of Citus - -* Tue Aug 17 2021 - Gurkan Indibay 10.0.5.citus-1 -- Official 10.0.5 release of Citus - -* Tue Aug 17 2021 - Gurkan Indibay 9.5.7.citus-1 -- Official 9.5.7 release of Citus - -* Wed Aug 11 2021 - Gurkan Indibay 9.4.6.citus-1 -- Official 9.4.6 release of Citus - -* Fri Aug 06 2021 - Gurkan Indibay 10.1.1.citus-1 -- Official 10.1.1 release of Citus - -* Fri Jul 16 2021 - Gurkan Indibay 10.1.0.citus-1 -- Official 10.1.0 release of Citus - -* Fri Jul 16 2021 - Gurkan Indibay 10.0.4.citus-1 -- Official 10.0.4 release of Citus - -* Fri Jul 09 2021 - Gurkan 9.5.6.citus-1 -- Official 9.5.6 release of Citus - -* Thu Jul 08 2021 - Gurkan 9.4.5.citus-1 -- Official 9.4.5 release of Citus - -* Thu Mar 18 2021 - Gurkan Indibay 10.0.3.citus-1 -- Official 10.0.3 release of Citus - -* Thu Mar 4 2021 - Gurkan Indibay 10.0.2.citus-1 -- Official 10.0.2 release of Citus - -* Wed Jan 27 2021 - gurkanindibay 9.5.2.citus-1 -- Official 9.5.2 release of Citus - -* Tue Jan 5 2021 - gurkanindibay 9.4.4.citus-1 -- Official 9.4.4 release of Citus - -* Wed Dec 2 2020 - Onur Tirtir 9.5.1.citus-1 -- Official 9.5.1 release of Citus - -* Tue Nov 24 2020 - Onur Tirtir 9.4.3.citus-1 -- Official 9.4.3 release of Citus - -* Wed Nov 11 2020 - Onur Tirtir 9.5.0.citus-1 -- Official 9.5.0 release of Citus - -* Thu Oct 22 2020 - Onur Tirtir 9.4.2.citus-1 -- Official 9.4.2 release of Citus - -* Wed Sep 30 2020 - Onur Tirtir 9.4.1.citus-1 -- Official 9.4.1 release of Citus - -* Tue Jul 28 2020 - Onur Tirtir 9.4.0.citus-1 -- Official 9.4.0 release of Citus - -* Mon Jul 27 2020 - Onur Tirtir 9.3.5.citus-1 -- Official 9.3.5 release of Citus - -* Wed Jul 22 2020 - Onur Tirtir 9.3.4.citus-1 -- Official 9.3.4 release of Citus - -* Mon Jul 13 2020 - Onur Tirtir 9.3.3.citus-1 -- Official 9.3.3 release of Citus - -* Thu May 7 2020 - Onur Tirtir 9.3.0.citus-1 -- Update to Citus 9.3.0 - -* Tue Mar 31 2020 - Onur Tirtir 9.2.4.citus-1 -- Update to Citus 9.2.4 - -* Thu Mar 26 2020 - Onur Tirtir 9.2.3.citus-1 -- Update to Citus 9.2.3 - -* Fri Mar 6 2020 - Onur Tirtir 9.0.2.citus-1 -- Update to Citus 9.0.2 - -* Fri Mar 6 2020 - Onur Tirtir 9.2.2.citus-1 -- Update to Citus 9.2.2 - -* Fri Feb 14 2020 - Onur Tirtir 9.2.1.citus-1 -- Update to Citus 9.2.1 - -* Mon Feb 10 2020 - Onur Tirtir 9.2.0.citus-1 -- Update to Citus 9.2.0 - -* Wed Dec 18 2019 - Onur Tirtir 9.1.1.citus-1 -- Update to Citus 9.1.1 - -* Thu Nov 28 2019 - Onur Tirtir 9.1.0.citus-1 -- Update to Citus 9.1.0 - -* Wed Oct 30 2019 - Hanefi Onaldi 9.0.1.citus-1 -- Update to Citus 9.0.1 - -* Thu Oct 10 2019 - Hanefi Onaldi 9.0.0.citus-1 -- Update to Citus 9.0.0 - -* Fri Aug 9 2019 - Hanefi Onaldi 8.3.2.citus-1 -- Update to Citus 8.3.2 - -* Mon Jul 29 2019 - Hanefi Onaldi 8.3.1.citus-1 -- Update to Citus 8.3.1 - -* Wed Jul 10 2019 - Burak Velioglu 8.3.0.citus-1 -- Update to Citus 8.3.0 - -* Wed Jun 12 2019 - Burak Velioglu 8.2.2.citus-1 -- Update to Citus 8.2.2 - -* Wed Apr 3 2019 - Burak Velioglu 8.2.1.citus-1 -- Update to Citus 8.2.1 - -* Wed Apr 3 2019 - Burak Velioglu 8.1.2.citus-1 -- Update to Citus 8.1.2 - -* Thu Mar 28 2019 - Burak Velioglu 8.2.0.citus-1 -- Update to Citus 8.2.0 - -* Wed Jan 9 2019 - Burak Velioglu 8.0.3.citus-1 -- Update to Citus 8.0.3 - -* Mon Jan 7 2019 - Burak Velioglu 8.1.1.citus-1 -- Update to Citus 8.1.1 - -* Tue Dec 18 2018 - Burak Velioglu 8.1.0.citus-1 -- Update to Citus 8.1.0 - -* Thu Dec 13 2018 - Burak Velioglu 8.0.2.citus-1 -- Update to Citus 8.0.2 - -* Wed Dec 12 2018 - Burak Velioglu 7.5.4.citus-1 -- Update to Citus 7.5.4 - -* Wed Nov 28 2018 - Burak Velioglu 8.0.1.citus-1 -- Update to Citus 8.0.1 - -* Wed Nov 28 2018 - Burak Velioglu 7.5.3.citus-1 -- Update to Citus 7.5.3 - -* Wed Nov 14 2018 - Burak Velioglu 7.5.2.citus-1 -- Update to Citus 7.5.2 - -* Fri Nov 02 2018 - Burak Velioglu 8.0.0.citus-1 -- Update to Citus 8.0.0 - -* Wed Aug 29 2018 - Burak Velioglu 7.5.1.citus-1 -- Update to Citus 7.5.1 - -* Fri Jul 27 2018 - Mehmet Furkan Sahin 7.4.2.citus-1 -- Update to Citus 7.4.2 - -* Wed Jul 25 2018 - Mehmet Furkan Sahin 7.5.0.citus-1 -- Update to Citus 7.5.0 - -* Wed Jun 20 2018 - Burak Velioglu 7.4.1.citus-1 -- Update to Citus 7.4.1 - -* Thu May 17 2018 - Burak Velioglu 7.2.2.citus-1 -- Update to Citus 7.2.2 - -* Tue May 15 2018 - Burak Velioglu 7.4.0.citus-1 -- Update to Citus 7.4.0 - -* Thu Mar 15 2018 - Burak Velioglu 7.3.0.citus-1 -- Update to Citus 7.3.0 - -* Tue Feb 6 2018 - Burak Velioglu 7.2.1.citus-1 -- Update to Citus 7.2.1 - -* Tue Jan 16 2018 - Burak Velioglu 7.2.0.citus-1 -- Update to Citus 7.2.0 - -* Thu Jan 11 2018 - Burak Velioglu 6.2.5.citus-1 -- Update to Citus 6.2.5 - -* Fri Jan 05 2018 - Burak Velioglu 7.1.2.citus-1 -- Update to Citus 7.1.2 - -* Tue Dec 05 2017 - Burak Velioglu 7.1.1.citus-1 -- Update to Citus 7.1.1 - -* Wed Nov 15 2017 - Burak Velioglu 7.1.0.citus-1 -- Update to Citus 7.1.0 - -* Mon Oct 16 2017 - Burak Yucesoy 7.0.3.citus-1 -- Update to Citus 7.0.3 - -* Thu Sep 28 2017 - Burak Yucesoy 7.0.2.citus-1 -- Update to Citus 7.0.2 - -* Thu Sep 28 2017 - Burak Yucesoy 6.2.4.citus-1 -- Update to Citus 6.2.4 - -* Thu Sep 28 2017 - Burak Yucesoy 6.1.3.citus-1 -- Update to Citus 6.1.3 - -* Tue Sep 12 2017 - Burak Yucesoy 7.0.1.citus-1 -- Update to Citus 7.0.1 - -* Tue Aug 29 2017 - Burak Yucesoy 7.0.0.citus-1 -- Update to Citus 7.0.0 - -* Thu Jul 13 2017 - Burak Yucesoy 6.2.3.citus-1 -- Update to Citus 6.2.3 - -* Wed Jun 7 2017 - Burak Velioglu 6.2.2.citus-1 -- Update to Citus 6.2.2 - -* Wed Jun 7 2017 - Jason Petersen 6.1.2.citus-1 -- Update to Citus 6.1.2 - -* Wed May 24 2017 - Jason Petersen 6.2.1.citus-1 -- Update to Citus 6.2.1 - -* Tue May 16 2017 - Burak Yucesoy 6.2.0.citus-1 -- Update to Citus 6.2.0 - -* Fri May 5 2017 - Metin Doslu 6.1.1.citus-1 -- Update to Citus 6.1.1 - -* Thu Feb 9 2017 - Burak Yucesoy 6.1.0.citus-1 -- Update to Citus 6.1.0 - -* Wed Feb 8 2017 - Jason Petersen 6.0.1.citus-2 -- Transitional package to guide users to new package name - -* Wed Nov 30 2016 - Burak Yucesoy 6.0.1.citus-1 -- Update to Citus 6.0.1 - -* Tue Nov 8 2016 - Jason Petersen 6.0.0.citus-1 -- Update to Citus 6.0.0 - -* Tue Nov 8 2016 - Jason Petersen 5.2.2.citus-1 -- Update to Citus 5.2.2 - -* Tue Sep 6 2016 - Jason Petersen 5.2.1.citus-1 -- Update to Citus 5.2.1 - -* Wed Aug 17 2016 - Jason Petersen 5.2.0.citus-1 -- Update to Citus 5.2.0 - -* Mon Aug 1 2016 - Jason Petersen 5.2.0-0.1.rc.1 -- Release candidate for 5.2 - -* Fri Jun 17 2016 - Jason Petersen 5.1.1-1 -- Update to Citus 5.1.1 - -* Tue May 17 2016 - Jason Petersen 5.1.0-1 -- Update to Citus 5.1.0 - -* Mon May 16 2016 - Jason Petersen 5.1.0-0.2.rc.2 -- Fix EXPLAIN output when FORMAT JSON in use - -* Wed May 4 2016 - Jason Petersen 5.1.0-0.1.rc.1 -- Release candidate for 5.1 - -* Fri Apr 15 2016 - Jason Petersen 5.0.1-1 -- Fixes issues on 32-bit systems - -* Fri Mar 25 2016 - Devrim Gündüz 5.0.0-1 -- Initial RPM packaging for PostgreSQL RPM Repository, - based on the spec file of Jason Petersen @ Citus. +%global pgmajorversion 11 +%global pgpackageversion 11 +%global pginstdir /usr/pgsql-%{pgpackageversion} +%global sname citus +%global debug_package %{nil} + +Summary: PostgreSQL-based distributed RDBMS +Name: %{sname}%{?pkginfix}_%{pgmajorversion} +Provides: %{sname}_%{pgmajorversion} +Conflicts: %{sname}_%{pgmajorversion} +Version: 10.1.4.citus +Release: 1%{dist} +License: AGPLv3 +Group: Applications/Databases +Source0: https://github.com/citusdata/citus/archive/v10.2.4.tar.gz +URL: https://github.com/citusdata/citus +BuildRequires: postgresql%{pgmajorversion}-devel libcurl-devel +Requires: postgresql%{pgmajorversion}-server +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +%description +Citus horizontally scales PostgreSQL across commodity servers +using sharding and replication. Its query engine parallelizes +incoming SQL queries across these servers to enable real-time +responses on large datasets. + +Citus extends the underlying database rather than forking it, +which gives developers and enterprises the power and familiarity +of a traditional relational database. As an extension, Citus +supports new PostgreSQL releases, allowing users to benefit from +new features while maintaining compatibility with existing +PostgreSQL tools. Note that Citus supports many (but not all) SQL +commands. + +%prep +%setup -q -n %{sname}-%{version} + +%build + +currentgccver="$(gcc -dumpversion)" +requiredgccver="4.8.2" +if [ "$(printf '%s\n' "$requiredgccver" "$currentgccver" | sort -V | head -n1)" != "$requiredgccver" ]; then + echo ERROR: At least GCC version "$requiredgccver" is needed to build with security flags + exit 1 +fi + +%configure PG_CONFIG=%{pginstdir}/bin/pg_config --with-extra-version="%{?conf_extra_version}" --with-security-flags CC=$(command -v gcc) +make %{?_smp_mflags} + +%install +%make_install +# Install documentation with a better name: +%{__mkdir} -p %{buildroot}%{pginstdir}/doc/extension +%{__cp} README.md %{buildroot}%{pginstdir}/doc/extension/README-%{sname}.md +%{__cp} NOTICE %{buildroot}%{pginstdir}/doc/extension/NOTICE-%{sname} +# Set paths to be packaged other than LICENSE, README & CHANGELOG.md +echo %{pginstdir}/include/server/citus_*.h >> installation_files.list +echo %{pginstdir}/include/server/distributed/*.h >> installation_files.list +echo %{pginstdir}/lib/%{sname}.so >> installation_files.list +[[ -f %{buildroot}%{pginstdir}/lib/citus_columnar.so ]] && echo %{pginstdir}/lib/citus_columnar.so >> installation_files.list +[[ -f %{buildroot}%{pginstdir}/lib/citus_decoders/pgoutput.so ]] && echo %{pginstdir}/lib/citus_decoders/pgoutput.so >> installation_files.list +[[ -f %{buildroot}%{pginstdir}/lib/citus_decoders/wal2json.so ]] && echo %{pginstdir}/lib/citus_decoders/wal2json.so >> installation_files.list +[[ -f %{buildroot}%{pginstdir}/lib/citus_pgoutput.so ]] && echo %{pginstdir}/lib/citus_pgoutput.so >> installation_files.list +[[ -f %{buildroot}%{pginstdir}/lib/citus_wal2json.so ]] && echo %{pginstdir}/lib/citus_wal2json.so >> installation_files.list +echo %{pginstdir}/share/extension/%{sname}-*.sql >> installation_files.list +echo %{pginstdir}/share/extension/%{sname}.control >> installation_files.list +# Since files below may be non-existent in some versions, ignoring the error in case of file absence +[[ -f %{buildroot}%{pginstdir}/share/extension/citus_columnar.control ]] && echo %{pginstdir}/share/extension/citus_columnar.control >> installation_files.list +columnar_sql_files=(`find %{buildroot}%{pginstdir}/share/extension -maxdepth 1 -name "columnar-*.sql"`) +if [ ${#columnar_sql_files[@]} -gt 0 ]; then + echo %{pginstdir}/share/extension/columnar-*.sql >> installation_files.list +fi + +citus_columnar_sql_files=(`find %{buildroot}%{pginstdir}/share/extension -maxdepth 1 -name "citus_columnar-*.sql"`) +if [ ${#citus_columnar_sql_files[@]} -gt 0 ]; then + echo %{pginstdir}/share/extension/citus_columnar-*.sql >> installation_files.list +fi + +[[ -f %{buildroot}%{pginstdir}/bin/pg_send_cancellation ]] && echo %{pginstdir}/bin/pg_send_cancellation >> installation_files.list +%ifarch ppc64 ppc64le +%else + %if 0%{?rhel} && 0%{?rhel} <= 6 + %else + echo %{pginstdir}/lib/bitcode/%{sname}/*.bc >> installation_files.list + echo %{pginstdir}/lib/bitcode/%{sname}*.bc >> installation_files.list + echo %{pginstdir}/lib/bitcode/%{sname}/*/*.bc >> installation_files.list + + # Columnar does not exist in Citus versions < 10.0 + # At this point, we don't have %{pginstdir}, + # so first check build directory for columnar. + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/columnar/ ]] && echo %{pginstdir}/lib/bitcode/columnar/*.bc >> installation_files.list + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_columnar/ ]] && echo %{pginstdir}/lib/bitcode/citus_columnar/*.bc >> installation_files.list + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_columnar/safeclib ]] && echo %{pginstdir}/lib/bitcode/citus_columnar/safeclib/*.bc >> installation_files.list + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_pgoutput ]] && echo %{pginstdir}/lib/bitcode/citus_pgoutput/*.bc >> installation_files.list + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_wal2json ]] && echo %{pginstdir}/lib/bitcode/citus_wal2json/*.bc >> installation_files.list + %endif +%endif + +%clean +%{__rm} -rf %{buildroot} + +%files -f installation_files.list +%files +%defattr(-,root,root,-) +%doc CHANGELOG.md +%if 0%{?rhel} && 0%{?rhel} <= 6 +%doc LICENSE +%else +%license LICENSE +%endif +%doc %{pginstdir}/doc/extension/README-%{sname}.md +%doc %{pginstdir}/doc/extension/NOTICE-%{sname} + +%changelog +* Tue Feb 01 2022 - Gurkan Indibay 10.1.4.citus-1 +- Official 10.1.4 release of Citus + +* Mon Nov 29 2021 - Gurkan Indibay 10.2.3.citus-1 +- Official 10.2.3 release of Citus + +* Fri Nov 12 2021 - Gurkan Indibay 10.0.6.citus-1 +- Official 10.0.6 release of Citus + +* Mon Nov 08 2021 - Gurkan Indibay 9.5.10.citus-1 +- Official 9.5.10 release of Citus + +* Thu Nov 04 2021 - Gurkan Indibay 9.2.8.citus-1 +- Official 9.2.8 release of Citus + +* Wed Nov 03 2021 - Gurkan Indibay 9.2.7.citus-1 +- Official 9.2.7 release of Citus + +* Thu Oct 14 2021 - Gurkan Indibay 10.2.2.citus-1 +- Official 10.2.2 release of Citus + +* Fri Sep 24 2021 - Gurkan Indibay 10.2.1.citus-1 +- Official 10.2.1 release of Citus + +* Fri Sep 17 2021 - Gurkan Indibay 10.1.3.citus-1 +- Official 10.1.3 release of Citus + +* Thu Sep 16 2021 - Gurkan Indibay 10.2.0.citus-1 +- Official 10.2.0 release of Citus + +* Tue Aug 17 2021 - Gurkan Indibay 10.1.2.citus-1 +- Official 10.1.2 release of Citus + +* Tue Aug 17 2021 - Gurkan Indibay 10.0.5.citus-1 +- Official 10.0.5 release of Citus + +* Tue Aug 17 2021 - Gurkan Indibay 9.5.7.citus-1 +- Official 9.5.7 release of Citus + +* Wed Aug 11 2021 - Gurkan Indibay 9.4.6.citus-1 +- Official 9.4.6 release of Citus + +* Fri Aug 06 2021 - Gurkan Indibay 10.1.1.citus-1 +- Official 10.1.1 release of Citus + +* Fri Jul 16 2021 - Gurkan Indibay 10.1.0.citus-1 +- Official 10.1.0 release of Citus + +* Fri Jul 16 2021 - Gurkan Indibay 10.0.4.citus-1 +- Official 10.0.4 release of Citus + +* Fri Jul 09 2021 - Gurkan 9.5.6.citus-1 +- Official 9.5.6 release of Citus + +* Thu Jul 08 2021 - Gurkan 9.4.5.citus-1 +- Official 9.4.5 release of Citus + +* Thu Mar 18 2021 - Gurkan Indibay 10.0.3.citus-1 +- Official 10.0.3 release of Citus + +* Thu Mar 4 2021 - Gurkan Indibay 10.0.2.citus-1 +- Official 10.0.2 release of Citus + +* Wed Jan 27 2021 - gurkanindibay 9.5.2.citus-1 +- Official 9.5.2 release of Citus + +* Tue Jan 5 2021 - gurkanindibay 9.4.4.citus-1 +- Official 9.4.4 release of Citus + +* Wed Dec 2 2020 - Onur Tirtir 9.5.1.citus-1 +- Official 9.5.1 release of Citus + +* Tue Nov 24 2020 - Onur Tirtir 9.4.3.citus-1 +- Official 9.4.3 release of Citus + +* Wed Nov 11 2020 - Onur Tirtir 9.5.0.citus-1 +- Official 9.5.0 release of Citus + +* Thu Oct 22 2020 - Onur Tirtir 9.4.2.citus-1 +- Official 9.4.2 release of Citus + +* Wed Sep 30 2020 - Onur Tirtir 9.4.1.citus-1 +- Official 9.4.1 release of Citus + +* Tue Jul 28 2020 - Onur Tirtir 9.4.0.citus-1 +- Official 9.4.0 release of Citus + +* Mon Jul 27 2020 - Onur Tirtir 9.3.5.citus-1 +- Official 9.3.5 release of Citus + +* Wed Jul 22 2020 - Onur Tirtir 9.3.4.citus-1 +- Official 9.3.4 release of Citus + +* Mon Jul 13 2020 - Onur Tirtir 9.3.3.citus-1 +- Official 9.3.3 release of Citus + +* Thu May 7 2020 - Onur Tirtir 9.3.0.citus-1 +- Update to Citus 9.3.0 + +* Tue Mar 31 2020 - Onur Tirtir 9.2.4.citus-1 +- Update to Citus 9.2.4 + +* Thu Mar 26 2020 - Onur Tirtir 9.2.3.citus-1 +- Update to Citus 9.2.3 + +* Fri Mar 6 2020 - Onur Tirtir 9.0.2.citus-1 +- Update to Citus 9.0.2 + +* Fri Mar 6 2020 - Onur Tirtir 9.2.2.citus-1 +- Update to Citus 9.2.2 + +* Fri Feb 14 2020 - Onur Tirtir 9.2.1.citus-1 +- Update to Citus 9.2.1 + +* Mon Feb 10 2020 - Onur Tirtir 9.2.0.citus-1 +- Update to Citus 9.2.0 + +* Wed Dec 18 2019 - Onur Tirtir 9.1.1.citus-1 +- Update to Citus 9.1.1 + +* Thu Nov 28 2019 - Onur Tirtir 9.1.0.citus-1 +- Update to Citus 9.1.0 + +* Wed Oct 30 2019 - Hanefi Onaldi 9.0.1.citus-1 +- Update to Citus 9.0.1 + +* Thu Oct 10 2019 - Hanefi Onaldi 9.0.0.citus-1 +- Update to Citus 9.0.0 + +* Fri Aug 9 2019 - Hanefi Onaldi 8.3.2.citus-1 +- Update to Citus 8.3.2 + +* Mon Jul 29 2019 - Hanefi Onaldi 8.3.1.citus-1 +- Update to Citus 8.3.1 + +* Wed Jul 10 2019 - Burak Velioglu 8.3.0.citus-1 +- Update to Citus 8.3.0 + +* Wed Jun 12 2019 - Burak Velioglu 8.2.2.citus-1 +- Update to Citus 8.2.2 + +* Wed Apr 3 2019 - Burak Velioglu 8.2.1.citus-1 +- Update to Citus 8.2.1 + +* Wed Apr 3 2019 - Burak Velioglu 8.1.2.citus-1 +- Update to Citus 8.1.2 + +* Thu Mar 28 2019 - Burak Velioglu 8.2.0.citus-1 +- Update to Citus 8.2.0 + +* Wed Jan 9 2019 - Burak Velioglu 8.0.3.citus-1 +- Update to Citus 8.0.3 + +* Mon Jan 7 2019 - Burak Velioglu 8.1.1.citus-1 +- Update to Citus 8.1.1 + +* Tue Dec 18 2018 - Burak Velioglu 8.1.0.citus-1 +- Update to Citus 8.1.0 + +* Thu Dec 13 2018 - Burak Velioglu 8.0.2.citus-1 +- Update to Citus 8.0.2 + +* Wed Dec 12 2018 - Burak Velioglu 7.5.4.citus-1 +- Update to Citus 7.5.4 + +* Wed Nov 28 2018 - Burak Velioglu 8.0.1.citus-1 +- Update to Citus 8.0.1 + +* Wed Nov 28 2018 - Burak Velioglu 7.5.3.citus-1 +- Update to Citus 7.5.3 + +* Wed Nov 14 2018 - Burak Velioglu 7.5.2.citus-1 +- Update to Citus 7.5.2 + +* Fri Nov 02 2018 - Burak Velioglu 8.0.0.citus-1 +- Update to Citus 8.0.0 + +* Wed Aug 29 2018 - Burak Velioglu 7.5.1.citus-1 +- Update to Citus 7.5.1 + +* Fri Jul 27 2018 - Mehmet Furkan Sahin 7.4.2.citus-1 +- Update to Citus 7.4.2 + +* Wed Jul 25 2018 - Mehmet Furkan Sahin 7.5.0.citus-1 +- Update to Citus 7.5.0 + +* Wed Jun 20 2018 - Burak Velioglu 7.4.1.citus-1 +- Update to Citus 7.4.1 + +* Thu May 17 2018 - Burak Velioglu 7.2.2.citus-1 +- Update to Citus 7.2.2 + +* Tue May 15 2018 - Burak Velioglu 7.4.0.citus-1 +- Update to Citus 7.4.0 + +* Thu Mar 15 2018 - Burak Velioglu 7.3.0.citus-1 +- Update to Citus 7.3.0 + +* Tue Feb 6 2018 - Burak Velioglu 7.2.1.citus-1 +- Update to Citus 7.2.1 + +* Tue Jan 16 2018 - Burak Velioglu 7.2.0.citus-1 +- Update to Citus 7.2.0 + +* Thu Jan 11 2018 - Burak Velioglu 6.2.5.citus-1 +- Update to Citus 6.2.5 + +* Fri Jan 05 2018 - Burak Velioglu 7.1.2.citus-1 +- Update to Citus 7.1.2 + +* Tue Dec 05 2017 - Burak Velioglu 7.1.1.citus-1 +- Update to Citus 7.1.1 + +* Wed Nov 15 2017 - Burak Velioglu 7.1.0.citus-1 +- Update to Citus 7.1.0 + +* Mon Oct 16 2017 - Burak Yucesoy 7.0.3.citus-1 +- Update to Citus 7.0.3 + +* Thu Sep 28 2017 - Burak Yucesoy 7.0.2.citus-1 +- Update to Citus 7.0.2 + +* Thu Sep 28 2017 - Burak Yucesoy 6.2.4.citus-1 +- Update to Citus 6.2.4 + +* Thu Sep 28 2017 - Burak Yucesoy 6.1.3.citus-1 +- Update to Citus 6.1.3 + +* Tue Sep 12 2017 - Burak Yucesoy 7.0.1.citus-1 +- Update to Citus 7.0.1 + +* Tue Aug 29 2017 - Burak Yucesoy 7.0.0.citus-1 +- Update to Citus 7.0.0 + +* Thu Jul 13 2017 - Burak Yucesoy 6.2.3.citus-1 +- Update to Citus 6.2.3 + +* Wed Jun 7 2017 - Burak Velioglu 6.2.2.citus-1 +- Update to Citus 6.2.2 + +* Wed Jun 7 2017 - Jason Petersen 6.1.2.citus-1 +- Update to Citus 6.1.2 + +* Wed May 24 2017 - Jason Petersen 6.2.1.citus-1 +- Update to Citus 6.2.1 + +* Tue May 16 2017 - Burak Yucesoy 6.2.0.citus-1 +- Update to Citus 6.2.0 + +* Fri May 5 2017 - Metin Doslu 6.1.1.citus-1 +- Update to Citus 6.1.1 + +* Thu Feb 9 2017 - Burak Yucesoy 6.1.0.citus-1 +- Update to Citus 6.1.0 + +* Wed Feb 8 2017 - Jason Petersen 6.0.1.citus-2 +- Transitional package to guide users to new package name + +* Wed Nov 30 2016 - Burak Yucesoy 6.0.1.citus-1 +- Update to Citus 6.0.1 + +* Tue Nov 8 2016 - Jason Petersen 6.0.0.citus-1 +- Update to Citus 6.0.0 + +* Tue Nov 8 2016 - Jason Petersen 5.2.2.citus-1 +- Update to Citus 5.2.2 + +* Tue Sep 6 2016 - Jason Petersen 5.2.1.citus-1 +- Update to Citus 5.2.1 + +* Wed Aug 17 2016 - Jason Petersen 5.2.0.citus-1 +- Update to Citus 5.2.0 + +* Mon Aug 1 2016 - Jason Petersen 5.2.0-0.1.rc.1 +- Release candidate for 5.2 + +* Fri Jun 17 2016 - Jason Petersen 5.1.1-1 +- Update to Citus 5.1.1 + +* Tue May 17 2016 - Jason Petersen 5.1.0-1 +- Update to Citus 5.1.0 + +* Mon May 16 2016 - Jason Petersen 5.1.0-0.2.rc.2 +- Fix EXPLAIN output when FORMAT JSON in use + +* Wed May 4 2016 - Jason Petersen 5.1.0-0.1.rc.1 +- Release candidate for 5.1 + +* Fri Apr 15 2016 - Jason Petersen 5.0.1-1 +- Fixes issues on 32-bit systems + +* Fri Mar 25 2016 - Devrim Gündüz 5.0.0-1 +- Initial RPM packaging for PostgreSQL RPM Repository, + based on the spec file of Jason Petersen @ Citus. diff --git a/packaging_automation/tests/files/citus_include_10_2_4.spec b/packaging_automation/tests/files/citus_include_10_2_4.spec index ec843d5b..8a9c3873 100644 --- a/packaging_automation/tests/files/citus_include_10_2_4.spec +++ b/packaging_automation/tests/files/citus_include_10_2_4.spec @@ -1,411 +1,411 @@ -%global pgmajorversion 11 -%global pgpackageversion 11 -%global pginstdir /usr/pgsql-%{pgpackageversion} -%global sname citus -%global debug_package %{nil} - -Summary: PostgreSQL-based distributed RDBMS -Name: %{sname}%{?pkginfix}_%{pgmajorversion} -Provides: %{sname}_%{pgmajorversion} -Conflicts: %{sname}_%{pgmajorversion} -Version: 10.2.4.citus -Release: 1%{dist} -License: AGPLv3 -Group: Applications/Databases -Source0: https://github.com/citusdata/citus/archive/v10.2.4.tar.gz -URL: https://github.com/citusdata/citus -BuildRequires: postgresql%{pgmajorversion}-devel libcurl-devel -Requires: postgresql%{pgmajorversion}-server -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -%description -Citus horizontally scales PostgreSQL across commodity servers -using sharding and replication. Its query engine parallelizes -incoming SQL queries across these servers to enable real-time -responses on large datasets. - -Citus extends the underlying database rather than forking it, -which gives developers and enterprises the power and familiarity -of a traditional relational database. As an extension, Citus -supports new PostgreSQL releases, allowing users to benefit from -new features while maintaining compatibility with existing -PostgreSQL tools. Note that Citus supports many (but not all) SQL -commands. - -%prep -%setup -q -n %{sname}-%{version} - -%build - -currentgccver="$(gcc -dumpversion)" -requiredgccver="4.8.2" -if [ "$(printf '%s\n' "$requiredgccver" "$currentgccver" | sort -V | head -n1)" != "$requiredgccver" ]; then - echo ERROR: At least GCC version "$requiredgccver" is needed to build with security flags - exit 1 -fi - -%configure PG_CONFIG=%{pginstdir}/bin/pg_config --with-extra-version="%{?conf_extra_version}" --with-security-flags CC=$(command -v gcc) -make %{?_smp_mflags} - -%install -%make_install -# Install documentation with a better name: -%{__mkdir} -p %{buildroot}%{pginstdir}/doc/extension -%{__cp} README.md %{buildroot}%{pginstdir}/doc/extension/README-%{sname}.md -%{__cp} NOTICE %{buildroot}%{pginstdir}/doc/extension/NOTICE-%{sname} -# Set paths to be packaged other than LICENSE, README & CHANGELOG.md -echo %{pginstdir}/include/server/citus_*.h >> installation_files.list -echo %{pginstdir}/include/server/distributed/*.h >> installation_files.list -echo %{pginstdir}/lib/%{sname}.so >> installation_files.list -[[ -f %{buildroot}%{pginstdir}/lib/citus_columnar.so ]] && echo %{pginstdir}/lib/citus_columnar.so >> installation_files.list -[[ -f %{buildroot}%{pginstdir}/lib/citus_decoders/pgoutput.so ]] && echo %{pginstdir}/lib/citus_decoders/pgoutput.so >> installation_files.list -[[ -f %{buildroot}%{pginstdir}/lib/citus_decoders/wal2json.so ]] && echo %{pginstdir}/lib/citus_decoders/wal2json.so >> installation_files.list -[[ -f %{buildroot}%{pginstdir}/lib/citus_pgoutput.so ]] && echo %{pginstdir}/lib/citus_pgoutput.so >> installation_files.list -[[ -f %{buildroot}%{pginstdir}/lib/citus_wal2json.so ]] && echo %{pginstdir}/lib/citus_wal2json.so >> installation_files.list -echo %{pginstdir}/share/extension/%{sname}-*.sql >> installation_files.list -echo %{pginstdir}/share/extension/%{sname}.control >> installation_files.list -# Since files below may be non-existent in some versions, ignoring the error in case of file absence -[[ -f %{buildroot}%{pginstdir}/share/extension/citus_columnar.control ]] && echo %{pginstdir}/share/extension/citus_columnar.control >> installation_files.list -columnar_sql_files=(`find %{buildroot}%{pginstdir}/share/extension -maxdepth 1 -name "columnar-*.sql"`) -if [ ${#columnar_sql_files[@]} -gt 0 ]; then - echo %{pginstdir}/share/extension/columnar-*.sql >> installation_files.list -fi - -citus_columnar_sql_files=(`find %{buildroot}%{pginstdir}/share/extension -maxdepth 1 -name "citus_columnar-*.sql"`) -if [ ${#citus_columnar_sql_files[@]} -gt 0 ]; then - echo %{pginstdir}/share/extension/citus_columnar-*.sql >> installation_files.list -fi - -[[ -f %{buildroot}%{pginstdir}/bin/pg_send_cancellation ]] && echo %{pginstdir}/bin/pg_send_cancellation >> installation_files.list -%ifarch ppc64 ppc64le -%else - %if 0%{?rhel} && 0%{?rhel} <= 6 - %else - echo %{pginstdir}/lib/bitcode/%{sname}/*.bc >> installation_files.list - echo %{pginstdir}/lib/bitcode/%{sname}*.bc >> installation_files.list - echo %{pginstdir}/lib/bitcode/%{sname}/*/*.bc >> installation_files.list - - # Columnar does not exist in Citus versions < 10.0 - # At this point, we don't have %{pginstdir}, - # so first check build directory for columnar. - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/columnar/ ]] && echo %{pginstdir}/lib/bitcode/columnar/*.bc >> installation_files.list - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_columnar/ ]] && echo %{pginstdir}/lib/bitcode/citus_columnar/*.bc >> installation_files.list - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_columnar/safeclib ]] && echo %{pginstdir}/lib/bitcode/citus_columnar/safeclib/*.bc >> installation_files.list - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_pgoutput ]] && echo %{pginstdir}/lib/bitcode/citus_pgoutput/*.bc >> installation_files.list - [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_wal2json ]] && echo %{pginstdir}/lib/bitcode/citus_wal2json/*.bc >> installation_files.list - %endif -%endif - -%clean -%{__rm} -rf %{buildroot} - -%files -f installation_files.list -%files -%defattr(-,root,root,-) -%doc CHANGELOG.md -%if 0%{?rhel} && 0%{?rhel} <= 6 -%doc LICENSE -%else -%license LICENSE -%endif -%doc %{pginstdir}/doc/extension/README-%{sname}.md -%doc %{pginstdir}/doc/extension/NOTICE-%{sname} - -%changelog -* Tue Feb 01 2022 - Gurkan Indibay 10.2.4.citus-1 -- Official 10.2.4 release of Citus - -* Tue Feb 01 2022 - Gurkan Indibay 10.1.4.citus-1 -- Official 10.1.4 release of Citus - -* Mon Nov 29 2021 - Gurkan Indibay 10.2.3.citus-1 -- Official 10.2.3 release of Citus - -* Fri Nov 12 2021 - Gurkan Indibay 10.0.6.citus-1 -- Official 10.0.6 release of Citus - -* Mon Nov 08 2021 - Gurkan Indibay 9.5.10.citus-1 -- Official 9.5.10 release of Citus - -* Thu Nov 04 2021 - Gurkan Indibay 9.2.8.citus-1 -- Official 9.2.8 release of Citus - -* Wed Nov 03 2021 - Gurkan Indibay 9.2.7.citus-1 -- Official 9.2.7 release of Citus - -* Thu Oct 14 2021 - Gurkan Indibay 10.2.2.citus-1 -- Official 10.2.2 release of Citus - -* Fri Sep 24 2021 - Gurkan Indibay 10.2.1.citus-1 -- Official 10.2.1 release of Citus - -* Fri Sep 17 2021 - Gurkan Indibay 10.1.3.citus-1 -- Official 10.1.3 release of Citus - -* Thu Sep 16 2021 - Gurkan Indibay 10.2.0.citus-1 -- Official 10.2.0 release of Citus - -* Tue Aug 17 2021 - Gurkan Indibay 10.1.2.citus-1 -- Official 10.1.2 release of Citus - -* Tue Aug 17 2021 - Gurkan Indibay 10.0.5.citus-1 -- Official 10.0.5 release of Citus - -* Tue Aug 17 2021 - Gurkan Indibay 9.5.7.citus-1 -- Official 9.5.7 release of Citus - -* Wed Aug 11 2021 - Gurkan Indibay 9.4.6.citus-1 -- Official 9.4.6 release of Citus - -* Fri Aug 06 2021 - Gurkan Indibay 10.1.1.citus-1 -- Official 10.1.1 release of Citus - -* Fri Jul 16 2021 - Gurkan Indibay 10.1.0.citus-1 -- Official 10.1.0 release of Citus - -* Fri Jul 16 2021 - Gurkan Indibay 10.0.4.citus-1 -- Official 10.0.4 release of Citus - -* Fri Jul 09 2021 - Gurkan 9.5.6.citus-1 -- Official 9.5.6 release of Citus - -* Thu Jul 08 2021 - Gurkan 9.4.5.citus-1 -- Official 9.4.5 release of Citus - -* Thu Mar 18 2021 - Gurkan Indibay 10.0.3.citus-1 -- Official 10.0.3 release of Citus - -* Thu Mar 4 2021 - Gurkan Indibay 10.0.2.citus-1 -- Official 10.0.2 release of Citus - -* Wed Jan 27 2021 - gurkanindibay 9.5.2.citus-1 -- Official 9.5.2 release of Citus - -* Tue Jan 5 2021 - gurkanindibay 9.4.4.citus-1 -- Official 9.4.4 release of Citus - -* Wed Dec 2 2020 - Onur Tirtir 9.5.1.citus-1 -- Official 9.5.1 release of Citus - -* Tue Nov 24 2020 - Onur Tirtir 9.4.3.citus-1 -- Official 9.4.3 release of Citus - -* Wed Nov 11 2020 - Onur Tirtir 9.5.0.citus-1 -- Official 9.5.0 release of Citus - -* Thu Oct 22 2020 - Onur Tirtir 9.4.2.citus-1 -- Official 9.4.2 release of Citus - -* Wed Sep 30 2020 - Onur Tirtir 9.4.1.citus-1 -- Official 9.4.1 release of Citus - -* Tue Jul 28 2020 - Onur Tirtir 9.4.0.citus-1 -- Official 9.4.0 release of Citus - -* Mon Jul 27 2020 - Onur Tirtir 9.3.5.citus-1 -- Official 9.3.5 release of Citus - -* Wed Jul 22 2020 - Onur Tirtir 9.3.4.citus-1 -- Official 9.3.4 release of Citus - -* Mon Jul 13 2020 - Onur Tirtir 9.3.3.citus-1 -- Official 9.3.3 release of Citus - -* Thu May 7 2020 - Onur Tirtir 9.3.0.citus-1 -- Update to Citus 9.3.0 - -* Tue Mar 31 2020 - Onur Tirtir 9.2.4.citus-1 -- Update to Citus 9.2.4 - -* Thu Mar 26 2020 - Onur Tirtir 9.2.3.citus-1 -- Update to Citus 9.2.3 - -* Fri Mar 6 2020 - Onur Tirtir 9.0.2.citus-1 -- Update to Citus 9.0.2 - -* Fri Mar 6 2020 - Onur Tirtir 9.2.2.citus-1 -- Update to Citus 9.2.2 - -* Fri Feb 14 2020 - Onur Tirtir 9.2.1.citus-1 -- Update to Citus 9.2.1 - -* Mon Feb 10 2020 - Onur Tirtir 9.2.0.citus-1 -- Update to Citus 9.2.0 - -* Wed Dec 18 2019 - Onur Tirtir 9.1.1.citus-1 -- Update to Citus 9.1.1 - -* Thu Nov 28 2019 - Onur Tirtir 9.1.0.citus-1 -- Update to Citus 9.1.0 - -* Wed Oct 30 2019 - Hanefi Onaldi 9.0.1.citus-1 -- Update to Citus 9.0.1 - -* Thu Oct 10 2019 - Hanefi Onaldi 9.0.0.citus-1 -- Update to Citus 9.0.0 - -* Fri Aug 9 2019 - Hanefi Onaldi 8.3.2.citus-1 -- Update to Citus 8.3.2 - -* Mon Jul 29 2019 - Hanefi Onaldi 8.3.1.citus-1 -- Update to Citus 8.3.1 - -* Wed Jul 10 2019 - Burak Velioglu 8.3.0.citus-1 -- Update to Citus 8.3.0 - -* Wed Jun 12 2019 - Burak Velioglu 8.2.2.citus-1 -- Update to Citus 8.2.2 - -* Wed Apr 3 2019 - Burak Velioglu 8.2.1.citus-1 -- Update to Citus 8.2.1 - -* Wed Apr 3 2019 - Burak Velioglu 8.1.2.citus-1 -- Update to Citus 8.1.2 - -* Thu Mar 28 2019 - Burak Velioglu 8.2.0.citus-1 -- Update to Citus 8.2.0 - -* Wed Jan 9 2019 - Burak Velioglu 8.0.3.citus-1 -- Update to Citus 8.0.3 - -* Mon Jan 7 2019 - Burak Velioglu 8.1.1.citus-1 -- Update to Citus 8.1.1 - -* Tue Dec 18 2018 - Burak Velioglu 8.1.0.citus-1 -- Update to Citus 8.1.0 - -* Thu Dec 13 2018 - Burak Velioglu 8.0.2.citus-1 -- Update to Citus 8.0.2 - -* Wed Dec 12 2018 - Burak Velioglu 7.5.4.citus-1 -- Update to Citus 7.5.4 - -* Wed Nov 28 2018 - Burak Velioglu 8.0.1.citus-1 -- Update to Citus 8.0.1 - -* Wed Nov 28 2018 - Burak Velioglu 7.5.3.citus-1 -- Update to Citus 7.5.3 - -* Wed Nov 14 2018 - Burak Velioglu 7.5.2.citus-1 -- Update to Citus 7.5.2 - -* Fri Nov 02 2018 - Burak Velioglu 8.0.0.citus-1 -- Update to Citus 8.0.0 - -* Wed Aug 29 2018 - Burak Velioglu 7.5.1.citus-1 -- Update to Citus 7.5.1 - -* Fri Jul 27 2018 - Mehmet Furkan Sahin 7.4.2.citus-1 -- Update to Citus 7.4.2 - -* Wed Jul 25 2018 - Mehmet Furkan Sahin 7.5.0.citus-1 -- Update to Citus 7.5.0 - -* Wed Jun 20 2018 - Burak Velioglu 7.4.1.citus-1 -- Update to Citus 7.4.1 - -* Thu May 17 2018 - Burak Velioglu 7.2.2.citus-1 -- Update to Citus 7.2.2 - -* Tue May 15 2018 - Burak Velioglu 7.4.0.citus-1 -- Update to Citus 7.4.0 - -* Thu Mar 15 2018 - Burak Velioglu 7.3.0.citus-1 -- Update to Citus 7.3.0 - -* Tue Feb 6 2018 - Burak Velioglu 7.2.1.citus-1 -- Update to Citus 7.2.1 - -* Tue Jan 16 2018 - Burak Velioglu 7.2.0.citus-1 -- Update to Citus 7.2.0 - -* Thu Jan 11 2018 - Burak Velioglu 6.2.5.citus-1 -- Update to Citus 6.2.5 - -* Fri Jan 05 2018 - Burak Velioglu 7.1.2.citus-1 -- Update to Citus 7.1.2 - -* Tue Dec 05 2017 - Burak Velioglu 7.1.1.citus-1 -- Update to Citus 7.1.1 - -* Wed Nov 15 2017 - Burak Velioglu 7.1.0.citus-1 -- Update to Citus 7.1.0 - -* Mon Oct 16 2017 - Burak Yucesoy 7.0.3.citus-1 -- Update to Citus 7.0.3 - -* Thu Sep 28 2017 - Burak Yucesoy 7.0.2.citus-1 -- Update to Citus 7.0.2 - -* Thu Sep 28 2017 - Burak Yucesoy 6.2.4.citus-1 -- Update to Citus 6.2.4 - -* Thu Sep 28 2017 - Burak Yucesoy 6.1.3.citus-1 -- Update to Citus 6.1.3 - -* Tue Sep 12 2017 - Burak Yucesoy 7.0.1.citus-1 -- Update to Citus 7.0.1 - -* Tue Aug 29 2017 - Burak Yucesoy 7.0.0.citus-1 -- Update to Citus 7.0.0 - -* Thu Jul 13 2017 - Burak Yucesoy 6.2.3.citus-1 -- Update to Citus 6.2.3 - -* Wed Jun 7 2017 - Burak Velioglu 6.2.2.citus-1 -- Update to Citus 6.2.2 - -* Wed Jun 7 2017 - Jason Petersen 6.1.2.citus-1 -- Update to Citus 6.1.2 - -* Wed May 24 2017 - Jason Petersen 6.2.1.citus-1 -- Update to Citus 6.2.1 - -* Tue May 16 2017 - Burak Yucesoy 6.2.0.citus-1 -- Update to Citus 6.2.0 - -* Fri May 5 2017 - Metin Doslu 6.1.1.citus-1 -- Update to Citus 6.1.1 - -* Thu Feb 9 2017 - Burak Yucesoy 6.1.0.citus-1 -- Update to Citus 6.1.0 - -* Wed Feb 8 2017 - Jason Petersen 6.0.1.citus-2 -- Transitional package to guide users to new package name - -* Wed Nov 30 2016 - Burak Yucesoy 6.0.1.citus-1 -- Update to Citus 6.0.1 - -* Tue Nov 8 2016 - Jason Petersen 6.0.0.citus-1 -- Update to Citus 6.0.0 - -* Tue Nov 8 2016 - Jason Petersen 5.2.2.citus-1 -- Update to Citus 5.2.2 - -* Tue Sep 6 2016 - Jason Petersen 5.2.1.citus-1 -- Update to Citus 5.2.1 - -* Wed Aug 17 2016 - Jason Petersen 5.2.0.citus-1 -- Update to Citus 5.2.0 - -* Mon Aug 1 2016 - Jason Petersen 5.2.0-0.1.rc.1 -- Release candidate for 5.2 - -* Fri Jun 17 2016 - Jason Petersen 5.1.1-1 -- Update to Citus 5.1.1 - -* Tue May 17 2016 - Jason Petersen 5.1.0-1 -- Update to Citus 5.1.0 - -* Mon May 16 2016 - Jason Petersen 5.1.0-0.2.rc.2 -- Fix EXPLAIN output when FORMAT JSON in use - -* Wed May 4 2016 - Jason Petersen 5.1.0-0.1.rc.1 -- Release candidate for 5.1 - -* Fri Apr 15 2016 - Jason Petersen 5.0.1-1 -- Fixes issues on 32-bit systems - -* Fri Mar 25 2016 - Devrim Gündüz 5.0.0-1 -- Initial RPM packaging for PostgreSQL RPM Repository, - based on the spec file of Jason Petersen @ Citus. +%global pgmajorversion 11 +%global pgpackageversion 11 +%global pginstdir /usr/pgsql-%{pgpackageversion} +%global sname citus +%global debug_package %{nil} + +Summary: PostgreSQL-based distributed RDBMS +Name: %{sname}%{?pkginfix}_%{pgmajorversion} +Provides: %{sname}_%{pgmajorversion} +Conflicts: %{sname}_%{pgmajorversion} +Version: 10.2.4.citus +Release: 1%{dist} +License: AGPLv3 +Group: Applications/Databases +Source0: https://github.com/citusdata/citus/archive/v10.2.4.tar.gz +URL: https://github.com/citusdata/citus +BuildRequires: postgresql%{pgmajorversion}-devel libcurl-devel +Requires: postgresql%{pgmajorversion}-server +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +%description +Citus horizontally scales PostgreSQL across commodity servers +using sharding and replication. Its query engine parallelizes +incoming SQL queries across these servers to enable real-time +responses on large datasets. + +Citus extends the underlying database rather than forking it, +which gives developers and enterprises the power and familiarity +of a traditional relational database. As an extension, Citus +supports new PostgreSQL releases, allowing users to benefit from +new features while maintaining compatibility with existing +PostgreSQL tools. Note that Citus supports many (but not all) SQL +commands. + +%prep +%setup -q -n %{sname}-%{version} + +%build + +currentgccver="$(gcc -dumpversion)" +requiredgccver="4.8.2" +if [ "$(printf '%s\n' "$requiredgccver" "$currentgccver" | sort -V | head -n1)" != "$requiredgccver" ]; then + echo ERROR: At least GCC version "$requiredgccver" is needed to build with security flags + exit 1 +fi + +%configure PG_CONFIG=%{pginstdir}/bin/pg_config --with-extra-version="%{?conf_extra_version}" --with-security-flags CC=$(command -v gcc) +make %{?_smp_mflags} + +%install +%make_install +# Install documentation with a better name: +%{__mkdir} -p %{buildroot}%{pginstdir}/doc/extension +%{__cp} README.md %{buildroot}%{pginstdir}/doc/extension/README-%{sname}.md +%{__cp} NOTICE %{buildroot}%{pginstdir}/doc/extension/NOTICE-%{sname} +# Set paths to be packaged other than LICENSE, README & CHANGELOG.md +echo %{pginstdir}/include/server/citus_*.h >> installation_files.list +echo %{pginstdir}/include/server/distributed/*.h >> installation_files.list +echo %{pginstdir}/lib/%{sname}.so >> installation_files.list +[[ -f %{buildroot}%{pginstdir}/lib/citus_columnar.so ]] && echo %{pginstdir}/lib/citus_columnar.so >> installation_files.list +[[ -f %{buildroot}%{pginstdir}/lib/citus_decoders/pgoutput.so ]] && echo %{pginstdir}/lib/citus_decoders/pgoutput.so >> installation_files.list +[[ -f %{buildroot}%{pginstdir}/lib/citus_decoders/wal2json.so ]] && echo %{pginstdir}/lib/citus_decoders/wal2json.so >> installation_files.list +[[ -f %{buildroot}%{pginstdir}/lib/citus_pgoutput.so ]] && echo %{pginstdir}/lib/citus_pgoutput.so >> installation_files.list +[[ -f %{buildroot}%{pginstdir}/lib/citus_wal2json.so ]] && echo %{pginstdir}/lib/citus_wal2json.so >> installation_files.list +echo %{pginstdir}/share/extension/%{sname}-*.sql >> installation_files.list +echo %{pginstdir}/share/extension/%{sname}.control >> installation_files.list +# Since files below may be non-existent in some versions, ignoring the error in case of file absence +[[ -f %{buildroot}%{pginstdir}/share/extension/citus_columnar.control ]] && echo %{pginstdir}/share/extension/citus_columnar.control >> installation_files.list +columnar_sql_files=(`find %{buildroot}%{pginstdir}/share/extension -maxdepth 1 -name "columnar-*.sql"`) +if [ ${#columnar_sql_files[@]} -gt 0 ]; then + echo %{pginstdir}/share/extension/columnar-*.sql >> installation_files.list +fi + +citus_columnar_sql_files=(`find %{buildroot}%{pginstdir}/share/extension -maxdepth 1 -name "citus_columnar-*.sql"`) +if [ ${#citus_columnar_sql_files[@]} -gt 0 ]; then + echo %{pginstdir}/share/extension/citus_columnar-*.sql >> installation_files.list +fi + +[[ -f %{buildroot}%{pginstdir}/bin/pg_send_cancellation ]] && echo %{pginstdir}/bin/pg_send_cancellation >> installation_files.list +%ifarch ppc64 ppc64le +%else + %if 0%{?rhel} && 0%{?rhel} <= 6 + %else + echo %{pginstdir}/lib/bitcode/%{sname}/*.bc >> installation_files.list + echo %{pginstdir}/lib/bitcode/%{sname}*.bc >> installation_files.list + echo %{pginstdir}/lib/bitcode/%{sname}/*/*.bc >> installation_files.list + + # Columnar does not exist in Citus versions < 10.0 + # At this point, we don't have %{pginstdir}, + # so first check build directory for columnar. + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/columnar/ ]] && echo %{pginstdir}/lib/bitcode/columnar/*.bc >> installation_files.list + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_columnar/ ]] && echo %{pginstdir}/lib/bitcode/citus_columnar/*.bc >> installation_files.list + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_columnar/safeclib ]] && echo %{pginstdir}/lib/bitcode/citus_columnar/safeclib/*.bc >> installation_files.list + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_pgoutput ]] && echo %{pginstdir}/lib/bitcode/citus_pgoutput/*.bc >> installation_files.list + [[ -d %{buildroot}%{pginstdir}/lib/bitcode/citus_wal2json ]] && echo %{pginstdir}/lib/bitcode/citus_wal2json/*.bc >> installation_files.list + %endif +%endif + +%clean +%{__rm} -rf %{buildroot} + +%files -f installation_files.list +%files +%defattr(-,root,root,-) +%doc CHANGELOG.md +%if 0%{?rhel} && 0%{?rhel} <= 6 +%doc LICENSE +%else +%license LICENSE +%endif +%doc %{pginstdir}/doc/extension/README-%{sname}.md +%doc %{pginstdir}/doc/extension/NOTICE-%{sname} + +%changelog +* Tue Feb 01 2022 - Gurkan Indibay 10.2.4.citus-1 +- Official 10.2.4 release of Citus + +* Tue Feb 01 2022 - Gurkan Indibay 10.1.4.citus-1 +- Official 10.1.4 release of Citus + +* Mon Nov 29 2021 - Gurkan Indibay 10.2.3.citus-1 +- Official 10.2.3 release of Citus + +* Fri Nov 12 2021 - Gurkan Indibay 10.0.6.citus-1 +- Official 10.0.6 release of Citus + +* Mon Nov 08 2021 - Gurkan Indibay 9.5.10.citus-1 +- Official 9.5.10 release of Citus + +* Thu Nov 04 2021 - Gurkan Indibay 9.2.8.citus-1 +- Official 9.2.8 release of Citus + +* Wed Nov 03 2021 - Gurkan Indibay 9.2.7.citus-1 +- Official 9.2.7 release of Citus + +* Thu Oct 14 2021 - Gurkan Indibay 10.2.2.citus-1 +- Official 10.2.2 release of Citus + +* Fri Sep 24 2021 - Gurkan Indibay 10.2.1.citus-1 +- Official 10.2.1 release of Citus + +* Fri Sep 17 2021 - Gurkan Indibay 10.1.3.citus-1 +- Official 10.1.3 release of Citus + +* Thu Sep 16 2021 - Gurkan Indibay 10.2.0.citus-1 +- Official 10.2.0 release of Citus + +* Tue Aug 17 2021 - Gurkan Indibay 10.1.2.citus-1 +- Official 10.1.2 release of Citus + +* Tue Aug 17 2021 - Gurkan Indibay 10.0.5.citus-1 +- Official 10.0.5 release of Citus + +* Tue Aug 17 2021 - Gurkan Indibay 9.5.7.citus-1 +- Official 9.5.7 release of Citus + +* Wed Aug 11 2021 - Gurkan Indibay 9.4.6.citus-1 +- Official 9.4.6 release of Citus + +* Fri Aug 06 2021 - Gurkan Indibay 10.1.1.citus-1 +- Official 10.1.1 release of Citus + +* Fri Jul 16 2021 - Gurkan Indibay 10.1.0.citus-1 +- Official 10.1.0 release of Citus + +* Fri Jul 16 2021 - Gurkan Indibay 10.0.4.citus-1 +- Official 10.0.4 release of Citus + +* Fri Jul 09 2021 - Gurkan 9.5.6.citus-1 +- Official 9.5.6 release of Citus + +* Thu Jul 08 2021 - Gurkan 9.4.5.citus-1 +- Official 9.4.5 release of Citus + +* Thu Mar 18 2021 - Gurkan Indibay 10.0.3.citus-1 +- Official 10.0.3 release of Citus + +* Thu Mar 4 2021 - Gurkan Indibay 10.0.2.citus-1 +- Official 10.0.2 release of Citus + +* Wed Jan 27 2021 - gurkanindibay 9.5.2.citus-1 +- Official 9.5.2 release of Citus + +* Tue Jan 5 2021 - gurkanindibay 9.4.4.citus-1 +- Official 9.4.4 release of Citus + +* Wed Dec 2 2020 - Onur Tirtir 9.5.1.citus-1 +- Official 9.5.1 release of Citus + +* Tue Nov 24 2020 - Onur Tirtir 9.4.3.citus-1 +- Official 9.4.3 release of Citus + +* Wed Nov 11 2020 - Onur Tirtir 9.5.0.citus-1 +- Official 9.5.0 release of Citus + +* Thu Oct 22 2020 - Onur Tirtir 9.4.2.citus-1 +- Official 9.4.2 release of Citus + +* Wed Sep 30 2020 - Onur Tirtir 9.4.1.citus-1 +- Official 9.4.1 release of Citus + +* Tue Jul 28 2020 - Onur Tirtir 9.4.0.citus-1 +- Official 9.4.0 release of Citus + +* Mon Jul 27 2020 - Onur Tirtir 9.3.5.citus-1 +- Official 9.3.5 release of Citus + +* Wed Jul 22 2020 - Onur Tirtir 9.3.4.citus-1 +- Official 9.3.4 release of Citus + +* Mon Jul 13 2020 - Onur Tirtir 9.3.3.citus-1 +- Official 9.3.3 release of Citus + +* Thu May 7 2020 - Onur Tirtir 9.3.0.citus-1 +- Update to Citus 9.3.0 + +* Tue Mar 31 2020 - Onur Tirtir 9.2.4.citus-1 +- Update to Citus 9.2.4 + +* Thu Mar 26 2020 - Onur Tirtir 9.2.3.citus-1 +- Update to Citus 9.2.3 + +* Fri Mar 6 2020 - Onur Tirtir 9.0.2.citus-1 +- Update to Citus 9.0.2 + +* Fri Mar 6 2020 - Onur Tirtir 9.2.2.citus-1 +- Update to Citus 9.2.2 + +* Fri Feb 14 2020 - Onur Tirtir 9.2.1.citus-1 +- Update to Citus 9.2.1 + +* Mon Feb 10 2020 - Onur Tirtir 9.2.0.citus-1 +- Update to Citus 9.2.0 + +* Wed Dec 18 2019 - Onur Tirtir 9.1.1.citus-1 +- Update to Citus 9.1.1 + +* Thu Nov 28 2019 - Onur Tirtir 9.1.0.citus-1 +- Update to Citus 9.1.0 + +* Wed Oct 30 2019 - Hanefi Onaldi 9.0.1.citus-1 +- Update to Citus 9.0.1 + +* Thu Oct 10 2019 - Hanefi Onaldi 9.0.0.citus-1 +- Update to Citus 9.0.0 + +* Fri Aug 9 2019 - Hanefi Onaldi 8.3.2.citus-1 +- Update to Citus 8.3.2 + +* Mon Jul 29 2019 - Hanefi Onaldi 8.3.1.citus-1 +- Update to Citus 8.3.1 + +* Wed Jul 10 2019 - Burak Velioglu 8.3.0.citus-1 +- Update to Citus 8.3.0 + +* Wed Jun 12 2019 - Burak Velioglu 8.2.2.citus-1 +- Update to Citus 8.2.2 + +* Wed Apr 3 2019 - Burak Velioglu 8.2.1.citus-1 +- Update to Citus 8.2.1 + +* Wed Apr 3 2019 - Burak Velioglu 8.1.2.citus-1 +- Update to Citus 8.1.2 + +* Thu Mar 28 2019 - Burak Velioglu 8.2.0.citus-1 +- Update to Citus 8.2.0 + +* Wed Jan 9 2019 - Burak Velioglu 8.0.3.citus-1 +- Update to Citus 8.0.3 + +* Mon Jan 7 2019 - Burak Velioglu 8.1.1.citus-1 +- Update to Citus 8.1.1 + +* Tue Dec 18 2018 - Burak Velioglu 8.1.0.citus-1 +- Update to Citus 8.1.0 + +* Thu Dec 13 2018 - Burak Velioglu 8.0.2.citus-1 +- Update to Citus 8.0.2 + +* Wed Dec 12 2018 - Burak Velioglu 7.5.4.citus-1 +- Update to Citus 7.5.4 + +* Wed Nov 28 2018 - Burak Velioglu 8.0.1.citus-1 +- Update to Citus 8.0.1 + +* Wed Nov 28 2018 - Burak Velioglu 7.5.3.citus-1 +- Update to Citus 7.5.3 + +* Wed Nov 14 2018 - Burak Velioglu 7.5.2.citus-1 +- Update to Citus 7.5.2 + +* Fri Nov 02 2018 - Burak Velioglu 8.0.0.citus-1 +- Update to Citus 8.0.0 + +* Wed Aug 29 2018 - Burak Velioglu 7.5.1.citus-1 +- Update to Citus 7.5.1 + +* Fri Jul 27 2018 - Mehmet Furkan Sahin 7.4.2.citus-1 +- Update to Citus 7.4.2 + +* Wed Jul 25 2018 - Mehmet Furkan Sahin 7.5.0.citus-1 +- Update to Citus 7.5.0 + +* Wed Jun 20 2018 - Burak Velioglu 7.4.1.citus-1 +- Update to Citus 7.4.1 + +* Thu May 17 2018 - Burak Velioglu 7.2.2.citus-1 +- Update to Citus 7.2.2 + +* Tue May 15 2018 - Burak Velioglu 7.4.0.citus-1 +- Update to Citus 7.4.0 + +* Thu Mar 15 2018 - Burak Velioglu 7.3.0.citus-1 +- Update to Citus 7.3.0 + +* Tue Feb 6 2018 - Burak Velioglu 7.2.1.citus-1 +- Update to Citus 7.2.1 + +* Tue Jan 16 2018 - Burak Velioglu 7.2.0.citus-1 +- Update to Citus 7.2.0 + +* Thu Jan 11 2018 - Burak Velioglu 6.2.5.citus-1 +- Update to Citus 6.2.5 + +* Fri Jan 05 2018 - Burak Velioglu 7.1.2.citus-1 +- Update to Citus 7.1.2 + +* Tue Dec 05 2017 - Burak Velioglu 7.1.1.citus-1 +- Update to Citus 7.1.1 + +* Wed Nov 15 2017 - Burak Velioglu 7.1.0.citus-1 +- Update to Citus 7.1.0 + +* Mon Oct 16 2017 - Burak Yucesoy 7.0.3.citus-1 +- Update to Citus 7.0.3 + +* Thu Sep 28 2017 - Burak Yucesoy 7.0.2.citus-1 +- Update to Citus 7.0.2 + +* Thu Sep 28 2017 - Burak Yucesoy 6.2.4.citus-1 +- Update to Citus 6.2.4 + +* Thu Sep 28 2017 - Burak Yucesoy 6.1.3.citus-1 +- Update to Citus 6.1.3 + +* Tue Sep 12 2017 - Burak Yucesoy 7.0.1.citus-1 +- Update to Citus 7.0.1 + +* Tue Aug 29 2017 - Burak Yucesoy 7.0.0.citus-1 +- Update to Citus 7.0.0 + +* Thu Jul 13 2017 - Burak Yucesoy 6.2.3.citus-1 +- Update to Citus 6.2.3 + +* Wed Jun 7 2017 - Burak Velioglu 6.2.2.citus-1 +- Update to Citus 6.2.2 + +* Wed Jun 7 2017 - Jason Petersen 6.1.2.citus-1 +- Update to Citus 6.1.2 + +* Wed May 24 2017 - Jason Petersen 6.2.1.citus-1 +- Update to Citus 6.2.1 + +* Tue May 16 2017 - Burak Yucesoy 6.2.0.citus-1 +- Update to Citus 6.2.0 + +* Fri May 5 2017 - Metin Doslu 6.1.1.citus-1 +- Update to Citus 6.1.1 + +* Thu Feb 9 2017 - Burak Yucesoy 6.1.0.citus-1 +- Update to Citus 6.1.0 + +* Wed Feb 8 2017 - Jason Petersen 6.0.1.citus-2 +- Transitional package to guide users to new package name + +* Wed Nov 30 2016 - Burak Yucesoy 6.0.1.citus-1 +- Update to Citus 6.0.1 + +* Tue Nov 8 2016 - Jason Petersen 6.0.0.citus-1 +- Update to Citus 6.0.0 + +* Tue Nov 8 2016 - Jason Petersen 5.2.2.citus-1 +- Update to Citus 5.2.2 + +* Tue Sep 6 2016 - Jason Petersen 5.2.1.citus-1 +- Update to Citus 5.2.1 + +* Wed Aug 17 2016 - Jason Petersen 5.2.0.citus-1 +- Update to Citus 5.2.0 + +* Mon Aug 1 2016 - Jason Petersen 5.2.0-0.1.rc.1 +- Release candidate for 5.2 + +* Fri Jun 17 2016 - Jason Petersen 5.1.1-1 +- Update to Citus 5.1.1 + +* Tue May 17 2016 - Jason Petersen 5.1.0-1 +- Update to Citus 5.1.0 + +* Mon May 16 2016 - Jason Petersen 5.1.0-0.2.rc.2 +- Fix EXPLAIN output when FORMAT JSON in use + +* Wed May 4 2016 - Jason Petersen 5.1.0-0.1.rc.1 +- Release candidate for 5.1 + +* Fri Apr 15 2016 - Jason Petersen 5.0.1-1 +- Fixes issues on 32-bit systems + +* Fri Mar 25 2016 - Devrim Gündüz 5.0.0-1 +- Initial RPM packaging for PostgreSQL RPM Repository, + based on the spec file of Jason Petersen @ Citus. diff --git a/packaging_automation/tests/files/debian.changelog.refer b/packaging_automation/tests/files/debian.changelog.refer index ffdce6ed..43cc3b2f 100644 --- a/packaging_automation/tests/files/debian.changelog.refer +++ b/packaging_automation/tests/files/debian.changelog.refer @@ -1,2155 +1,2155 @@ -citus (10.1.4.citus-1) stable; urgency=low - - * Official 10.1.4 release of Citus - - -- Gurkan Indibay Tue, 01 Feb 2022 11:49:11 +0000 - -citus (10.2.3.citus-1) stable; urgency=low - - * Official 10.2.3 release of Citus - - -- Gurkan Indibay Mon, 29 Nov 2021 11:00:41 +0000 - -citus (10.0.6.citus-1) stable; urgency=low - - * Official 10.0.6 release of Citus - - -- Gurkan Indibay Fri, 12 Nov 2021 11:37:25 +0000 - -citus (9.5.10.citus-1) stable; urgency=low - - * Official 9.5.10 release of Citus - - -- Gurkan Indibay Mon, 08 Nov 2021 14:18:56 +0000 - -citus (9.2.8.citus-1) stable; urgency=low - - * Official 9.2.8 release of Citus - - -- Gurkan Indibay Thu, 04 Nov 2021 12:45:09 +0000 - -citus (9.2.7.citus-1) stable; urgency=low - - * Official 9.2.7 release of Citus - - -- Gurkan Indibay Wed, 03 Nov 2021 09:34:30 +0000 - -citus (10.2.2.citus-1) stable; urgency=low - - * Official 10.2.2 release of Citus - - -- Gurkan Indibay Thu, 14 Oct 2021 13:00:27 +0000 - -citus (10.2.1.citus-1) stable; urgency=low - - * Adds missing version-mismatch checks for columnar tables - - * Adds missing version-mismatch checks for internal functions - - * Fixes a bug that could cause partition shards being not co-located with - parent shards - - * Fixes a bug that prevents pushing down boolean expressions when using - columnar custom scan - - * Fixes a clog lookup failure that could occur when writing to a columnar - table - - * Fixes an issue that could cause unexpected errors when there is an - in-progress write to a columnar table - - * Revokes read access to `columnar.chunk` from unprivileged user - - -- Gurkan Indibay Fri, 24 Sep 2021 12:03:35 +0000 - -citus (10.1.3.citus-1) stable; urgency=low - - * Official 10.1.3 release of Citus - - -- Gurkan Indibay Fri, 17 Sep 2021 17:23:05 +0000 - -citus (10.2.0.citus-1) stable; urgency=low - - * Adds PostgreSQL 14 support - - * Adds hash & btree index support for columnar tables - - * Adds helper UDFs for easy time partition management: - `get_missing_time_partition_ranges`, `create_time_partitions`, and - `drop_old_time_partitions` - - * Adds propagation of ALTER SEQUENCE - - * Adds support for ALTER INDEX ATTACH PARTITION - - * Adds support for CREATE INDEX ON ONLY - - * Allows more graceful failovers when replication factor > 1 - - * Enables chunk group filtering to work with Params for columnar tables - - * Enables qual push down for joins including columnar tables - - * Enables transferring of data using binary encoding by default on PG14 - - * Improves `master_update_table_statistics` and provides distributed deadlock - detection - - * Includes `data_type` and `cache` in sequence definition on worker - - * Makes start/stop_metadata_sync_to_node() transactional - - * Makes sure that table exists before updating table statistics - - * Prevents errors with concurrent `citus_update_table_statistics` and DROP table - - * Reduces memory usage of columnar table scans by freeing the memory used for - last stripe read - - * Shows projected columns for columnar tables in EXPLAIN output - - * Speeds up dropping partitioned tables - - * Synchronizes hasmetadata flag on mx workers - - * Uses current user while syncing metadata - - * Adds a parameter to cleanup metadata when metadata syncing is stopped - - * Fixes a bug about int and smallint sequences on MX - - * Fixes a bug that cause partitions to have wrong distribution key after - DROP COLUMN - - * Fixes a bug that caused `worker_append_table_to_shard` to write as superuser - - * Fixes a bug that caused `worker_create_or_alter_role` to crash with NULL input - - * Fixes a bug that causes pruning incorrect shard of a range distributed table - - * Fixes a bug that may cause crash while aborting transaction - - * Fixes a bug that prevents attaching partitions when colocated foreign key - exists - - * Fixes a bug with `nextval('seq_name'::text)` - - * Fixes a crash in shard rebalancer when no distributed tables exist - - * Fixes a segfault caused by use after free in when using a cached connection - - * Fixes a UNION pushdown issue - - * Fixes a use after free issue that could happen when altering a distributed - table - - * Fixes showing target shard size in the rebalance progress monitor - - -- Gurkan Indibay Thu, 16 Sep 2021 08:45:17 +0000 - -citus (10.1.2.citus-1) stable; urgency=low - - * Allows more graceful failovers when replication factor > 1 - - * Fixes a bug that causes partitions to have wrong distribution key after - `DROP COLUMN` - - -- Gurkan Indibay Tue, 17 Aug 2021 16:25:14 +0000 - -citus (10.0.5.citus-1) stable; urgency=low - - * Allows more graceful failovers when replication factor > 1 - - * Fixes a bug that causes partitions to have wrong distribution key after - `DROP COLUMN` - - * Improves citus_update_table_statistics and provides distributed deadlock - detection - - -- Gurkan Indibay Tue, 17 Aug 2021 14:42:54 +0000 - -citus (9.5.7.citus-1) stable; urgency=low - - * Allows more graceful failovers when replication factor > 1 - - * Fixes a bug that causes partitions to have wrong distribution key after - `DROP COLUMN` - - * Improves master_update_table_statistics and provides distributed deadlock - detection - - -- Gurkan Indibay Tue, 17 Aug 2021 13:18:11 +0000 - -citus (9.4.6.citus-1) stable; urgency=low - - * Allows more graceful failovers when replication factor > 1 - - * Improves master_update_table_statistics and provides distributed deadlock - detection - - -- Gurkan Indibay Wed, 11 Aug 2021 08:57:04 +0000 - -citus (10.1.1.citus-1) stable; urgency=low - - * Improves citus_update_table_statistics and provides distributed deadlock - detection - - * Fixes showing target shard size in the rebalance progress monitor - - -- Gurkan Indibay Fri, 06 Aug 2021 08:38:37 +0000 - -citus (10.1.0.citus-1) stable; urgency=low - - * Drops support for PostgreSQL 11 - - * Adds `shard_count` parameter to `create_distributed_table` function - - * Adds support for `ALTER DATABASE OWNER` - - * Adds support for temporary columnar tables - - * Adds support for using sequences as column default values when syncing - metadata - - * `alter_columnar_table_set` enforces columnar table option constraints - - * Continues to remove shards after failure in `DropMarkedShards` - - * Deprecates the `citus.replication_model` GUC - - * Enables `citus.defer_drop_after_shard_move` by default - - * Ensures free disk space before moving a shard - - * Fetches shard size on the fly for the rebalance monitor - - * Ignores old placements when disabling or removing a node - - * Implements `improvement_threshold` at shard rebalancer moves - - * Improves orphaned shard cleanup logic - - * Improves performance of `citus_shards` - - * Introduces `citus.local_hostname` GUC for connections to the current node - - * Makes sure connection is closed after each shard move - - * Makes sure that target node in shard moves is eligible for shard move - - * Optimizes partitioned disk size calculation for shard rebalancer - - * Prevents connection errors by properly terminating connections - - * Prevents inheriting a distributed table - - * Prevents users from dropping & truncating known shards - - * Pushes down `VALUES` clause as long as not in outer part of a `JOIN` - - * Reduces memory usage for multi-row inserts - - * Reduces memory usage while rebalancing shards - - * Removes length limits around partition names - - * Removes dependencies on the existence of public schema - - * Executor avoids opening extra connections - - * Excludes orphaned shards while finding shard placements - - * Preserves access method of materialized views when undistributing - or altering distributed tables - - * Fixes a bug that allowed moving of shards belonging to a reference table - - * Fixes a bug that can cause a crash when DEBUG4 logging is enabled - - * Fixes a bug that causes pruning incorrect shard of a range distributed table - - * Fixes a bug that causes worker_create_or_alter_role to crash with NULL input - - * Fixes a bug where foreign key to reference table was disallowed - - * Fixes a bug with local cached plans on tables with dropped columns - - * Fixes data race in `get_rebalance_progress` - - * Fixes `FROM ONLY` queries on partitioned tables - - * Fixes an issue that could cause citus_finish_pg_upgrade to fail - - * Fixes error message for local table joins - - * Fixes issues caused by omitting public schema in queries - - * Fixes nested `SELECT` query with `UNION` bug - - * Fixes null relationName bug at parallel execution - - * Fixes possible segfaults when using Citus in the middle of an upgrade - - * Fixes problems with concurrent calls of `DropMarkedShards` - - * Fixes shared dependencies that are not resident in a database - - * Fixes stale hostnames bug in prepared statements after `master_update_node` - - * Fixes the relation size bug during rebalancing - - * Fixes two race conditions in the get_rebalance_progress - - * Fixes using 2PC when it might be necessary - - -- Gurkan Indibay Fri, 16 Jul 2021 15:37:21 +0000 - -citus (10.0.4.citus-1) stable; urgency=low - - * Introduces `citus.local_hostname` GUC for connections to the current node - - * Removes dependencies on the existence of public schema - - * Removes limits around long partition names - - * Fixes a bug that can cause a crash when DEBUG4 logging is enabled - - * Fixes a bug that causes pruning incorrect shard of a range distributed table - - * Fixes an issue that could cause citus_finish_pg_upgrade to fail - - * Fixes FROM ONLY queries on partitioned tables - - * Fixes issues caused by public schema being omitted in queries - - * Fixes problems with concurrent calls of DropMarkedShards - - * Fixes relname null bug when using parallel execution - - * Fixes two race conditions in the get_rebalance_progress - - -- Gurkan Indibay Fri, 16 Jul 2021 10:58:55 +0000 - -citus (9.5.6.citus-1) stable; urgency=low - - * Fixes minor bug in `citus_prepare_pg_upgrade` that caused it to lose its - idempotency - - -- Gurkan Fri, 09 Jul 2021 13:30:42 +0000 - -citus (9.4.5.citus-1) stable; urgency=low - - * Adds a configure flag to enforce security - - * Avoids re-using connections for intermediate results - - * Fixes a bug that causes pruning incorrect shard of a range distributed table - - * Fixes a bug that might cause self-deadlocks when COPY used in TX block - - * Fixes an issue that could cause citus_finish_pg_upgrade to fail - - -- Gurkan Thu, 08 Jul 2021 10:15:23 +0000 - -citus (10.0.3.citus-1) stable; urgency=low - - * Prevents infinite recursion for queries that involve UNION ALL - below `JOIN` - - * Fixes a crash in queries with a modifying CTE and a SELECT - without `FROM` - - * Fixes upgrade and downgrade paths for citus_update_table_statistics - - * Fixes a bug that causes SELECT queries to use 2PC unnecessarily - - * Fixes a bug that might cause self-deadlocks with - `CREATE INDEX` / `REINDEX CONCURRENTLY` commands - - * Adds citus.max_cached_connection_lifetime GUC to set maximum connection - lifetime - - * Adds citus.remote_copy_flush_threshold GUC that controls - per-shard memory usages by `COPY` - - * Adds citus_get_active_worker_nodes UDF to deprecate - `master_get_active_worker_nodes` - - * Skips 2PC for readonly connections in a transaction - - * Makes sure that local execution starts coordinated transaction - - * Removes open temporary file warning when cancelling a query with - an open tuple store - - * Relaxes the locks when adding an existing node - - -- Gurkan Indibay Thu, 18 Mar 2021 01:40:08 +0000 - -citus (10.0.2.citus-1) stable; urgency=low - - * Adds a configure flag to enforce security - - * Fixes a bug due to cross join without target list - - * Fixes a bug with UNION ALL on PG 13 - - * Fixes a compatibility issue with pg_audit in utility calls - - * Fixes insert query with CTEs/sublinks/subqueries etc - - * Grants SELECT permission on citus_tables view to public - - * Grants SELECT permission on columnar metadata tables to public - - * Improves citus_update_table_statistics and provides distributed deadlock - detection - - * Preserves colocation with procedures in alter_distributed_table - - * Prevents using alter_columnar_table_set and alter_columnar_table_reset - on a columnar table not owned by the user - - * Removes limits around long table names - - -- Gurkan Indibay Thu, 4 Mar 2021 2:46:54 +0000 - -citus (9.5.2.citus-1) stable; urgency=low - - * Fixes distributed deadlock detection being blocked by metadata sync - - * Prevents segfaults when SAVEPOINT handling cannot recover from connection - failures - - * Fixes possible issues that might occur with single shard distributed tables - - -- gurkanindibay Wed, 27 Jan 2021 11:25:38 +0000 - -citus (9.4.4.citus-1) stable; urgency=low - - * Fixes a bug that could cause router queries with local tables to be pushed - down - - * Fixes a segfault in connection management due to invalid connection hash - entries - - * Fixes possible issues that might occur with single shard distributed tables - - -- gurkanindibay Tue, 5 Jan 2021 14:58:56 +0000 - -citus (9.5.1.citus-1) stable; urgency=low - - * Enables PostgreSQL's parallel queries on EXPLAIN ANALYZE - - * Fixes a bug that could cause excessive memory consumption when a partition is - created - - * Fixes a bug that triggers subplan executions unnecessarily with cursors - - * Fixes a segfault in connection management due to invalid connection hash - entries - - -- Onur Tirtir Wed, 2 Dec 2020 14:28:44 +0000 - -citus (9.4.3.citus-1) stable; urgency=low - - * Enables PostgreSQL's parallel queries on EXPLAIN ANALYZE - - * Fixes a bug that triggers subplan executions unnecessarily with cursors - - -- Onur Tirtir Tue, 24 Nov 2020 11:17:57 +0000 - -citus (9.5.0.citus-1) stable; urgency=low - - * Adds support for PostgreSQL 13 - - * Removes the task-tracker executor - - * Introduces citus local tables - - * Introduces undistribute_table UDF to convert tables back to postgres tables - - * Adds support for EXPLAIN (ANALYZE) EXECUTE and EXPLAIN EXECUTE - - * Adds support for EXPLAIN (ANALYZE, WAL) for PG13 - - * Sorts the output of EXPLAIN (ANALYZE) by execution duration. - - * Adds support for CREATE TABLE ... USING table_access_method - - * Adds support for WITH TIES option in SELECT and INSERT SELECT queries - - * Avoids taking multi-shard locks on workers - - * Enforces citus.max_shared_pool_size config in COPY queries - - * Enables custom aggregates with multiple parameters to be executed on workers - - * Enforces citus.max_intermediate_result_size in local execution - - * Improves cost estimation of INSERT SELECT plans - - * Introduces delegation of procedures that read from reference tables - - * Prevents pull-push execution for simple pushdownable subqueries - - * Improves error message when creating a foreign key to a local table - - * Makes citus_prepare_pg_upgrade idempotent by dropping transition tables - - * Disallows ON TRUE outer joins with reference & distributed tables when - reference table is outer relation to avoid incorrect results - - * Disallows field indirection in INSERT/UPDATE queries to avoid incorrect - results - - * Disallows volatile functions in UPDATE subqueries to avoid incorrect results - - * Fixes CREATE INDEX CONCURRENTLY crash with local execution - - * Fixes citus_finish_pg_upgrade to drop all backup tables - - * Fixes a bug that cause failures when RECURSIVE VIEW joined reference table - - * Fixes DROP SEQUENCE failures when metadata syncing is enabled - - * Fixes a bug that caused CREATE TABLE with CHECK constraint to fail - - * Fixes a bug that could cause VACUUM to deadlock - - * Fixes master_update_node failure when no background worker slots are available - - * Fixes a bug that caused replica identity to not be propagated on shard repair - - * Fixes a bug that could cause crashes after connection timeouts - - * Fixes a bug that could cause crashes with certain compile flags - - * Fixes a bug that could cause deadlocks on CREATE INDEX - - * Fixes a bug with genetic query optimization in outer joins - - * Fixes a crash when aggregating empty tables - - * Fixes a crash with inserting domain constrained composite types - - * Fixes a crash with multi-row & router INSERT's in local execution - - * Fixes a possibility of doing temporary file cleanup more than once - - * Fixes incorrect setting of join related fields - - * Fixes memory issues around deparsing index commands - - * Fixes reference table access tracking for sequential execution - - * Fixes removal of a single node with only reference tables - - * Fixes sending commands to coordinator when it is added as a worker - - * Fixes write queries with const expressions and COLLATE in various places - - * Fixes wrong cancellation message about distributed deadlock - - -- Onur Tirtir Wed, 11 Nov 2020 15:00:27 +0000 - -citus (9.4.2.citus-1) stable; urgency=low - - * Fixes a bug that could lead to multiple maintenance daemons - - * Fixes an issue preventing views in reference table modifications - - -- Onur Tirtir Thu, 22 Oct 2020 8:53:44 +0000 - -citus (9.4.1.citus-1) stable; urgency=low - - * Fixes EXPLAIN ANALYZE output truncation - - * Fixes a deadlock during transaction recovery - - -- Onur Tirtir Wed, 30 Sep 2020 9:33:46 +0000 - -citus (9.4.0.citus-1) stable; urgency=low - - * Improves COPY by honoring max_adaptive_executor_pool_size config - - * Adds support for insert into local table select from distributed table - - * Adds support to partially push down tdigest aggregates - - * Adds support for receiving binary encoded results from workers using - citus.enable_binary_protocol - - * Enables joins between local tables and CTEs - - * Adds showing query text in EXPLAIN output when explain verbose is true - - * Adds support for showing CTE statistics in EXPLAIN ANALYZE - - * Adds support for showing amount of data received in EXPLAIN ANALYZE - - * Introduces downgrade paths in migration scripts - - * Avoids returning incorrect results when changing roles in a transaction - - * Fixes ALTER TABLE IF EXISTS SET SCHEMA with non-existing table bug - - * Fixes CREATE INDEX CONCURRENTLY with no index name on a postgres table bug - - * Fixes a bug that could cause crashes with certain compile flags - - * Fixes a bug with lists of configuration values in ALTER ROLE SET statements - - * Fixes a bug that occurs when coordinator is added as a worker node - - * Fixes a crash because of overflow in partition id with certain compile flags - - * Fixes a crash that may happen if no worker nodes are added - - * Fixes a crash that occurs when inserting implicitly coerced constants - - * Fixes a crash when aggregating empty tables - - * Fixes a memory leak in subtransaction memory handling - - * Fixes crash when using rollback to savepoint after cancellation of DML - - * Fixes deparsing for queries with anonymous column references - - * Fixes distribution of composite types failing to include typemods - - * Fixes explain analyze on adaptive executor repartitions - - * Fixes possible error throwing in abort handle - - * Fixes segfault when evaluating func calls with default params on coordinator - - * Fixes several EXPLAIN ANALYZE issues - - * Fixes write queries with const expressions and COLLATE in various places - - * Fixes wrong cancellation message about distributed deadlocks - - * Reports correct INSERT/SELECT method in EXPLAIN - - * Disallows triggers on citus tables - - -- Onur Tirtir Tue, 28 Jul 2020 13:22:31 +0000 - -citus (9.3.5.citus-1) stable; urgency=low - - * Fixes ALTER TABLE IF EXISTS SET SCHEMA with non-existing table bug - - * Fixes CREATE INDEX CONCURRENTLY with no index name on a postgres table bug - - * Fixes a crash because of overflow in partition id with certain compile flags - - -- Onur Tirtir Mon, 27 Jul 2020 7:28:18 +0000 - -citus (9.3.4.citus-1) stable; urgency=low - - * Fixes a bug that could cause crashes with certain compile flags - - * Fixes a bug with lists of configuration values in ALTER ROLE SET statements - - * Fixes deparsing for queries with anonymous column references - - -- Onur Tirtir Wed, 22 Jul 2020 9:00:01 +0000 - -citus (9.3.3.citus-1) stable; urgency=low - - * Fixes a memory leak in subtransaction memory handling - - -- Onur Tirtir Mon, 13 Jul 2020 8:47:40 +0000 - -citus (9.3.0.citus-1) stable; urgency=low - - * Adds max_shared_pool_size to control number of connections across sessions - - * Adds support for window functions on coordinator - - * Improves shard pruning logic to understand OR-conditions - - * Prevents using an extra connection for intermediate result multi-casts - - * Adds propagation of ALTER ROLE .. SET statements - - * Adds update_distributed_table_colocation UDF to update colocation of tables - - * Introduces a UDF to truncate local data after distributing a table - - * Adds support for creating temp schemas in parallel - - * Adds support for evaluation of nextval in the target list on coordinator - - * Adds support for local execution of COPY/TRUNCATE/DROP/DDL commands - - * Adds support for local execution of shard creation - - * Uses local execution in a transaction block - - * Adds support for querying distributed table sizes concurrently - - * Allows master_copy_shard_placement to replicate placements to new nodes - - * Allows table type to be used in target list - - * Avoids having multiple maintenance daemons active for a single database - - * Defers reference table replication to shard creation time - - * Enables joins between local tables and reference tables in transaction blocks - - * Ignores pruned target list entries in coordinator plan - - * Improves SIGTERM handling of maintenance daemon - - * Increases the default of citus.node_connection_timeout to 30 seconds - - * Fixes a bug that occurs when creating remote tasks in local execution - - * Fixes a bug that causes some DML queries containing aggregates to fail - - * Fixes a bug that could cause failures in queries with subqueries or CTEs - - * Fixes a bug that may cause some connection failures to throw errors - - * Fixes a bug which caused queries with SRFs and function evalution to fail - - * Fixes a bug with generated columns when executing COPY dist_table TO file - - * Fixes a crash when using non-constant limit clauses - - * Fixes a failure when composite types used in prepared statements - - * Fixes a possible segfault when dropping dist. table in a transaction block - - * Fixes a possible segfault when non-pushdownable aggs are solely used in HAVING - - * Fixes a segfault when executing queries using GROUPING - - * Fixes an error when using LEFT JOIN with GROUP BY on primary key - - * Fixes an issue with distributing tables having generated cols not at the end - - * Fixes automatic SSL permission issue when using "initdb --allow-group-access" - - * Fixes errors which could occur when subqueries are parameters to aggregates - - * Fixes possible issues by invalidating the plan cache in master_update_node - - * Fixes timing issues which could be caused by changing system clock - - -- Onur Tirtir Thu, 7 May 2020 15:11:25 +0000 - -citus (9.2.4.citus-1) stable; urgency=low - - * Fixes a release problem in 9.2.3 - - -- Onur Tirtir Tue, 31 Mar 2020 08:06:59 +0000 - -citus (9.2.3.citus-1) stable; urgency=low - - * Do not use C functions that have been banned by Microsoft - - * Fixes a bug that causes wrong results with complex outer joins - - * Fixes issues found using static analysis - - * Fixes left join shard pruning in pushdown planner - - * Fixes possibility for segmentation fault in internal aggregate functions - - * Fixes possible segfault when non pushdownable aggregates are used in HAVING - - * Improves correctness of planning subqueries in HAVING - - * Prevents using old connections for security if citus.node_conninfo changed - - * Uses Microsoft approved cipher string for default TLS setup - - -- Onur Tirtir Thu, 26 Mar 2020 8:22:48 +0000 - -citus (9.0.2.citus-1) stable; urgency=low - - * Fixes build errors on EL/OL 6 based distros - - * Fixes a bug that caused maintenance daemon to fail on standby nodes - - * Disallows distributed function creation when replication_model is `statement` - - -- Onur Tirtir Fri, 6 Mar 2020 14:10:16 +0000 - -citus (9.2.2.citus-1) stable; urgency=low - - * Fixes a bug that caused some prepared stmts with function calls to fail - - * Fixes a bug that caused some prepared stmts with composite types to fail - - * Fixes a bug that caused missing subplan results in workers - - * Improves performance of re-partition joins - - -- Onur Tirtir Fri, 6 Mar 2020 07:14:20 +0000 - -citus (9.2.1.citus-1) stable; urgency=low - - * Fixes a bug that could cause crashes if distribution key is NULL - - -- Onur Tirtir Fri, 14 Feb 2020 11:51:09 +0000 - -citus (9.2.0.citus-1) stable; urgency=low - - * Adds support for INSERT...SELECT queries with re-partitioning - - * Adds citus.coordinator_aggregation_strategy to support more aggregates - - * Adds caching of local plans on shards for Citus MX - - * Adds compatibility support for dist. object infrastructure from old versions - - * Adds defering shard-pruning for fast-path router queries to execution - - * Adds propagation of GRANT ... ON SCHEMA queries - - * Adds support for CTE pushdown via CTE inlining in distributed planning - - * Adds support for ALTER TABLE ... SET SCHEMA propagation. - - * Adds support for DROP ROUTINE & ALTER ROUTINE commands - - * Adds support for any inner join on a reference table - - * Changes citus.log_remote_commands level to NOTICE - - * Disallows marking ref. table shards unhealthy in the presence of savepoints - - * Disallows placing new shards with shards in TO_DELETE state - - * Enables local execution of queries that do not need any data access - - * Fixes Makefile trying to cleanup PG directory during install - - * Fixes a bug causing errors when planning a query with multiple subqueries - - * Fixes a possible deadlock that could happen during shard moves - - * Fixes a problem when adding a new node due to tables referenced in func body - - * Fixes an issue that could cause joins with reference tables to be slow - - * Fixes cached metadata for shard is inconsistent issue - - * Fixes inserting multiple composite types as partition key in VALUES - - * Fixes unnecessary repartition on joins with more than 4 tables - - * Prevents wrong results for replicated partitioned tables after failure - - * Restricts LIMIT approximation for non-commutative aggregates - - -- Onur Tirtir Wed, 10 Feb 2020 8:48:00 +0000 - -citus (9.1.1.citus-1) stable; urgency=low - - * Fixes a bug causing SQL-executing UDFs to crash when passing in DDL - - * Fixes a bug that caused column_to_column_name to crash for invalid input - - * Fixes a bug that caused inserts into local tables w/ dist. subqueries to crash - - * Fixes a bug that caused some noop DML statements to fail - - * Fixes a bug that prevents dropping reference table columns - - * Fixes a crash in IN (.., NULL) queries - - * Fixes a crash when calling a distributed function from PL/pgSQL - - * Fixes an issue that caused CTEs to sometimes leak connections - - * Fixes strange errors in DML with unreachable sublinks - - * Prevents statements in SQL functions to run outside of a transaction - - -- Onur Tirtir Wed, 18 Dec 2019 14:32:42 +0000 - -citus (9.1.0.citus-1) stable; urgency=low - - * Adds extensions to distributed object propagation infrastructure - - * Adds support for ALTER ROLE propagation - - * Adds support for aggregates in create_distributed_function - - * Adds support for expressions in reference joins - - * Adds support for returning RECORD in multi-shard queries - - * Adds support for simple IN subqueries on unique cols in repartition joins - - * Adds support for subqueries in HAVING clauses - - * Automatically distributes unary aggs w/ combinefunc and non-internal stype - - * Disallows distributed func creation when replication_model is 'statement' - - * Drops support for deprecated real-time and router executors - - * Fixes a bug in local execution that could cause missing rows in RETURNING - - * Fixes a bug that caused maintenance daemon to fail on standby nodes - - * Fixes a bug that caused other CREATE EXTENSION commands to take longer - - * Fixes a bug that prevented REFRESH MATERIALIZED VIEW - - * Fixes a bug when view is used in modify statements - - * Fixes a memory leak in adaptive executor when query returns many columns - - * Fixes underflow init of default values in worker extended op node creation - - * Fixes potential segfault in standard_planner inlining functions - - * Fixes an issue that caused failures in RHEL 6 builds - - * Fixes queries with repartition joins and group by unique column - - * Improves CTE/Subquery performance by pruning intermediate rslt broadcasting - - * Removes citus.worker_list_file GUC - - * Revokes usage from the citus schema from public - - -- Onur Tirtir Thu, 28 Nov 2019 15:11:05 +0000 - -citus (9.0.1.citus-1) stable; urgency=low - - * Fixes a memory leak in the executor - - * Revokes usage from the citus schema from public - - -- Hanefi Onaldi Wed, 30 Oct 2019 8:53:22 +0000 - -citus (9.0.0.citus-1) stable; urgency=low - - * Adds support for PostgreSQL 12 - - * Adds UDFs to help with PostgreSQL upgrades - - * Distributes types to worker nodes - - * Introduces create_distributed_function UDF - - * Introduces local query execution for Citus MX - - * Implements infrastructure for routing CALL to MX workers - - * Implements infrastructure for routing SELECT function() to MX workers - - * Adds support for foreign key constraints between reference tables - - * Adds a feature flag to turn off CREATE TYPE propagation - - * Adds option citus.single_shard_commit_protocol - - * Adds support for EXPLAIN SUMMARY - - * Adds support for GENERATE ALWAYS AS STORED - - * Adds support for serial and smallserial in MX mode - - * Adds support for anon composite types on the target list in router queries - - * Avoids race condition between create_reference_table & master_add_node - - * Fixes a bug in schemas of distributed sequence definitions - - * Fixes a bug that caused run_command_on_colocated_placements to fail - - * Fixes a bug that leads to various issues when a connection is lost - - * Fixes a schema leak on CREATE INDEX statement - - * Fixes assert failure in bare SELECT FROM reference table FOR UPDATE in MX - - * Makes master_update_node MX compatible - - * Prevents pg_dist_colocation from multiple records for reference tables - - * Prevents segfault in worker_partition_protocol edgecase - - * Propagates ALTER FUNCTION statements for distributed functions - - * Propagates CREATE OR REPLACE FUNCTION for distributed functions - - * Propagates REINDEX on tables & indexes - - * Provides a GUC to turn of the new dependency propagation functionality - - * Uses 2PC in adaptive executor when dealing with replication factors above 1 - - -- Hanefi Onaldi Tue, 15 Oct 2019 16:54:50 +0000 - -citus (8.3.2.citus-1) stable; urgency=low - - * Fixes performance issues by skipping unnecessary relation access recordings - - -- Hanefi Onaldi Fri, 9 Aug 2019 11:15:57 +0000 - -citus (8.3.1.citus-1) stable; urgency=low - - * Improves Adaptive Executor performance - - -- Hanefi Onaldi Mon, 29 Jul 2019 10:25:50 +0000 - -citus (8.3.0.citus-1) stable; urgency=low - - * Adds a new distributed executor: Adaptive Executor - - * citus.enable_statistics_collection defaults to off (opt-in) - - * Adds support for CTEs in router planner for modification queries - - * Adds support for propagating SET LOCAL at xact start - - * Adds option to force master_update_node during failover - - * Deprecates master_modify_multiple_shards - - * Improves round robin logic on router queries - - * Creates all distributed schemas as superuser on a separate connection - - * Makes COPY adapt to connection use behaviour of previous commands - - * Replaces SESSION_LIFESPAN with configurable no. of connections at xact end - - * Propagates ALTER FOREIGN TABLE commands to workers - - * Don't schedule tasks on inactive nodes - - * Makes DROP/VALIDATE CONSTRAINT tolerant of ambiguous shard extension - - * Fixes an issue with subquery map merge jobs as non-root - - * Fixes null pointers caused by partial initialization of ConnParamsHashEntry - - * Fixes errors caused by joins with shadowed aliases - - * Fixes a regression in outer joining subqueries introduced in 8.2.0 - - * Fixes a crash that can occur under high memory load - - * Fixes a bug that selects wrong worker when using round-robin assignment - - * Fixes savepoint rollback after multi-shard modify/copy failure - - * Fixes bad foreign constraint name search - - * Fixes a bug that prevents stack size to be adjusted - - -- Hanefi Onaldi Wed, 10 Jul 2019 15:19:02 +0000 - -citus (8.2.2.citus-1) stable; urgency=low - - * Fixes a bug in outer joins wrapped in subqueries - - -- Burak Velioglu Wed, 12 Jun 2019 8:45:08 +0000 - -citus (8.2.1.citus-1) stable; urgency=low - - * Fixes a bug that prevents stack size to be adjusted - - -- Burak Velioglu Wed, 3 Apr 2019 20:56:47 +0000 - -citus (8.1.2.citus-1) stable; urgency=low - - * Don't do redundant ALTER TABLE consistency checks at coordinator - - * Fixes a bug that prevents stack size to be adjusted - - * Fix an issue with some DECLARE .. CURSOR WITH HOLD commands - - -- Burak Velioglu Wed, 3 Apr 2019 20:34:46 +0000 - -citus (8.2.0.citus-1) stable; urgency=low - - * Removes support and code for PostgreSQL 9.6 - - * Enable more outer joins with reference tables - - * Execute CREATE INDEX CONCURRENTLY in parallel - - * Treat functions as transaction blocks - - * Add support for column aliases on join clauses - - * Skip standard_planner() for trivial queries - - * Added support for function calls in joins - - * Round-robin task assignment policy relies on local transaction id - - * Relax subquery union pushdown restrictions for reference tables - - * Speed-up run_command_on_shards() - - * Address some memory issues in connection config - - * Restrict visibility of get_*_active_transactions functions to pg_monitor - - * Don't do redundant ALTER TABLE consistency checks at coordinator - - * Queries with only intermediate results do not rely on task assignment policy - - * Finish connection establishment in parallel for multiple connections - - * Fixes a bug related to pruning shards using a coerced value - - * Fix an issue with some DECLARE .. CURSOR WITH HOLD commands - - * Fixes a bug that could lead to infinite recursion during recursive planning - - * Fixes a bug that could prevent planning full outer joins with using clause - - * Fixes a bug that could lead to memory leak on citus_relation_size - - * Fixes a problem that could cause segmentation fault with recursive planning - - * Switch CI solution to CircleCI - - -- Burak Velioglu Fri, 29 Mar 2019 07:36:09 +0000 - -citus (8.0.3.citus-1) stable; urgency=low - - * Fixes maintenance daemon panic due to unreleased spinlock - - * Fixes an issue with having clause when used with complex joins - - -- Hanefi Onaldi Wed, 9 Jan 2019 9:50:07 +0000 - -citus (8.1.1.citus-1) stable; urgency=low - - * Fixes maintenance daemon panic due to unreleased spinlock - - * Fixes an issue with having clause when used with complex joins - - -- Hanefi Onaldi Mon, 7 Jan 2019 16:26:13 +0000 - -citus (8.1.0.citus-1) stable; urgency=low - - * Turns on ssl by default for new installations of citus - - * Restricts SSL Ciphers to TLS1.2 and above - - * Adds support for INSERT INTO SELECT..ON CONFLICT/RETURNING via coordinator - - * Adds support for round-robin task assignment for queries to reference tables - - * Adds support for SQL tasks using worker_execute_sql_task UDF with task-tracker - - * Adds support for VALIDATE CONSTRAINT queries - - * Adds support for disabling hash aggregate with HLL - - * Adds user ID suffix to intermediate files generated by task-tracker - - * Only allow transmit from pgsql_job_cache directory - - * Disallows GROUPING SET clauses in subqueries - - * Removes restriction on user-defined group ID in node addition functions - - * Relaxes multi-shard modify locks when enable_deadlock_prevention is disabled - - * Improves security in task-tracker protocol - - * Improves permission checks in internal DROP TABLE functions - - * Improves permission checks in cluster management functions - - * Cleans up UDFs and fixes permission checks - - * Fixes crashes caused by stack size increase under high memory load - - * Fixes a bug that could cause maintenance daemon panic - - -- Burak Velioglu Tue, 18 Dec 2018 15:12:45 +0000 - -citus (8.0.2.citus-1) stable; urgency=low - - * Fixes a bug that could cause maintenance daemon panic - - * Fixes crashes caused by stack size increase under high memory load - - -- Burak Velioglu Thu, 13 Dec 2018 13:56:44 +0000 - -citus (7.5.4.citus-1) stable; urgency=low - - * Fixes a bug that could cause maintenance daemon panic - - -- Burak Velioglu Wed, 12 Dec 2018 11:45:24 +0000 - -citus (8.0.1.citus-1) stable; urgency=low - - * Execute SQL tasks using worker_execute_sql_task UDF when using task-tracker - - -- Burak Velioglu Wed, 28 Nov 2018 11:38:47 +0000 - -citus (7.5.3.citus-1) stable; urgency=low - - * Execute SQL tasks using worker_execute_sql_task UDF when using task-tracker - - -- Burak Velioglu Wed, 28 Nov 2018 10:52:20 +0000 - -citus (7.5.2.citus-1) stable; urgency=low - - * Fixes inconsistent metadata error when shard metadata caching get interrupted - - * Fixes a bug that could cause memory leak - - * Fixes a bug that prevents recovering wrong transactions in MX - - * Fixes a bug to prevent wrong memory accesses on Citus MX under very high load - - * Fixes crashes caused by stack size increase under high memory load - - -- Burak Velioglu Wed, 14 Nov 2018 20:42:16 +0000 - -citus (8.0.0.citus-1) stable; urgency=low - - * Adds support for PostgreSQL 11 - - * Adds support for applying DML operations on reference tables from MX nodes - - * Adds distributed locking to truncated MX tables - - * Adds support for running TRUNCATE command from MX worker nodes - - * Adds views to provide insight about the distributed transactions - - * Adds support for TABLESAMPLE in router queries - - * Adds support for INCLUDE option in index creation - - * Adds option to allow simple DML commands from hot standby - - * Adds support for partitioned tables with replication factor > 1 - - * Prevents a deadlock on concurrent DROP TABLE and SELECT on Citus MX - - * Fixes a bug that prevents recovering wrong transactions in MX - - * Fixes a bug to prevent wrong memory accesses on Citus MX under very high load - - * Fixes a bug in MX mode, calling DROP SCHEMA with existing partitioned table - - * Fixes a bug that could cause modifying CTEs to select wrong execution mode - - * Fixes a bug preventing rollback in CREATE PROCEDURE - - * Fixes a bug on not being able to drop index on a partitioned table - - * Fixes a bug on TRUNCATE when there is a foreign key to a reference table - - * Fixes a performance issue in prepared INSERT..SELECT - - * Fixes a bug which causes errors on DROP DATABASE IF EXISTS - - * Fixes a bug to remove intermediate result directory in pull-push execution - - * Improves query pushdown planning performance - - * Evaluate functions anywhere in query - - -- Burak Velioglu Fri, 02 Nov 2018 08:06:42 +0000 - -citus (7.5.1.citus-1) stable; urgency=low - - * Improves query pushdown planning performance - - * Fixes a bug that could cause modifying CTEs to select wrong execution mode - - -- Burak Velioglu Wed, 29 Aug 2018 08:06:42 +0000 - -citus (7.4.2.citus-1) stable; urgency=low - - * Fixes a segfault in real-time executor during online shard move - - -- Mehmet Furkan Sahin Fri, 27 Jul 2018 13:42:27 +0000 - -citus (7.5.0.citus-1) stable; urgency=low - - * Adds foreign key support from hash distributed to reference tables - - * Adds SELECT ... FOR UPDATE support for router plannable queries - - * Adds support for non-partition columns in count distinct - - * Fixes a segfault in real-time executor during online shard move - - * Fixes ALTER TABLE ADD COLUMN constraint check - - * Fixes a bug where INSERT ... SELECT allows one to update dist. column - - * Allows DDL commands to be sequentialized via citus.multi_shard_modify_mode - - * Adds support for topn_union_agg and topn_add_agg across shards - - * Adds support for hll_union_agg and hll_add_agg across shards - - * Fixes a bug that might cause shards to have a wrong owner - - * Adds select_opens_transaction_block GUC - - * Adds utils to implement DDLs for policies in future - - * Makes intermediate results to use separate connections - - * Adds a node_conninfo GUC to set outgoing connection settings - - -- Mehmet Furkan Sahin Wed, 25 Jul 2018 9:32:24 +0000 - -citus (6.2.6.citus-1) stable; urgency=low - - * Adds support for respecting enable_hashagg in the master planner - - -- Burak Velioglu Fri, 06 Jul 2018 13:30:08 +0000 - -citus (7.4.1.citus-1) stable; urgency=low - - * Fixes a bug that could cause txns to incorrectly proceed after failure - - * Fixes a bug on INSERT ... SELECT queries in prepared statements - - -- Burak Velioglu Wed, 20 Jun 2018 12:25:30 +0000 - -citus (7.2.2.citus-1) stable; urgency=low - - * Fixes a bug that could cause SELECTs to crash during a rebalance - - -- Burak Velioglu Thu, 17 May 2018 11:51:56 +0000 - -citus (7.4.0.citus-1) stable; urgency=low - - * Adds support for non-pushdownable subqueries and CTEs in UPDATE/DELETE - - * Adds support for pushdownable subqueries and joins in UPDATE/DELETE - - * Adds faster shard pruning for subqueries - - * Adds partitioning support to MX table - - * Adds support for (VACUUM | ANALYZE) VERBOSE - - * Adds support for multiple ANDs in HAVING for pushdown planner - - * Adds support for quotation needy schema names - - * Improves operator check time in physical planner for custom data types - - * Removes broadcast join logic - - * Deprecates large_table_shard_count and master_expire_table_cache() - - * Modifies master_update_node to write-lock shards hosted by node over update - - * DROP TABLE now drops shards as the currrent user instead of the superuser - - * Adds specialised error codes for connection failures - - * Improves error messages on connection failure - - * Fixes issue which prevented multiple citus_table_size calls per query - - * Tests are updated to use create_distributed_table - - -- Burak Velioglu Tue, 15 May 2018 13:01:17 +0000 - -citus (7.3.0.citus-1) stable; urgency=low - - * Adds support for non-colocated joins between subqueries - - * Adds support for window functions that can be pushed down to worker - - * Adds support for modifying CTEs - - * Adds recursive plan for WHERE clause subqueries with recurring FROM clause - - * Adds support for bool_ and bit_ aggregates - - * Adds support for Postgres jsonb and json aggregation functions - - * Adds support for respecting enable_hashagg in the master plan - - * Performance improvements to reduce distributed planning time - - * Fixes a bug on planner when aggregate is used in ORDER BY - - * Fixes a bug on planner when DISTINCT (ON) clause is used with GROUP BY - - * Fixes a planner bug with distinct and aggregate clauses - - * Fixes a bug that opened new connections on each table size function call - - * Fixes a bug canceling backends not involved in distributed deadlocks - - * Fixes count distinct bug on column expressions when used with subqueries - - * Improves error handling on worker node failures - - * Improves error messages for INSERT queries that have subqueries - - -- Burak Velioglu Thu, 15 Mar 2018 14:16:10 +0000 - -citus (7.2.1.citus-1) stable; urgency=low - - * Fixes count distinct bug on column expressions when used with subqueries - - * Adds support for respecting enable_hashagg in the master plan - - * Fixes a bug canceling backends not involved in distributed deadlocks - - -- Burak Velioglu Tue, 06 Feb 2018 14:46:07 +0000 - -citus (7.2.0.citus-1) stable; urgency=low - - * Adds support for CTEs - - * Adds support for subqueries that require merge step - - * Adds support for set operations (UNION, INTERSECT, ...) - - * Adds support for 2PC auto-recovery - - * Adds support for querying local tables in CTEs and subqueries - - * Adds support for more SQL coverage in subqueries for reference tables - - * Adds support for count(distinct) in queries with a subquery - - * Adds support for non-equijoins when there is already an equijoin - - * Adds support for real-time executor to run in transaction blocks - - * Adds infrastructure for storing intermediate distributed query results - - * Adds a new GUC named enable_repartition_joins for auto executor switch - - * Adds support for limiting the intermediate result size - - * Improves support for queries with unions containing filters - - * Improves support for queries with unions containing joins - - * Improves support for subqueries in the WHERE clause - - * Increases COPY throughput - - * Enables pushing down queries containing only recurring tuples and GROUP BY - - * Load-balance queries that read from 0 shards - - * Improves support for using functions in subqueries - - * Fixes a bug that causing real-time executor to crash during cancellation - - * Fixes a bug that causing real-time executor to get stuck on cancellation - - * Fixes a bug that could block modification queries unnecessarily - - * Fixes a bug that could cause assigning wrong IDs to transactions - - * Fixes a bug that could cause an assert failure with ANALYZE statements - - * Fixes a bug that would push down wrong set operations in subqueries - - * Fixes a bug that could cause a deadlock in create_distributed_table - - * Fixes a bug that could confuse user about ANALYZE usage - - * Fixes a bug causing false positive distributed deadlock detections - - * Relaxes the locking for DDL commands on partitioned tables - - * Relaxes the locking on COPY with replication - - * Logs more remote commands when citus.log_remote_commands is set - - -- Burak Velioglu Tue, 16 Jan 2018 14:34:20 +0000 - -citus (6.2.5.citus-1) stable; urgency=low - - * Fixes a bug that could crash the coordinator while reporting a remote error - - -- Burak Velioglu Thu, 11 Jan 2018 11:40:28 +0000 - -citus (7.1.2.citus-1) stable; urgency=low - - * Fixes a bug that could cause assigning wrong IDs to transactions - - * Increases COPY throughput - - -- Burak Velioglu Fri, 05 Jan 2018 09:00:07 +0000 - -citus (7.1.1.citus-1) stable; urgency=low - - * Fixes a bug preventing pushing down subqueries with reference tables - - * Fixes a bug that could create false positive distributed deadlocks - - * Fixes a bug that could prevent running concurrent COPY and multi-shard DDL - - * Fixes a bug that could mislead users about ANALYZE queries - - -- Burak Velioglu Tue, 05 Dec 2017 09:00:07 +0000 - -citus (7.1.0.citus-1) stable; urgency=low - - * Adds support for native queries with multi shard UPDATE/DELETE queries - - * Expands reference table support in subquery pushdown - - * Adds window function support for subqueries and INSERT ... SELECT queries - - * Adds support for COUNT(DISTINCT) [ON] queries on non-partition columns - - * Adds support for DISTINCT [ON] queries on non-partition columns - - * Introduces basic usage statistic collector - - * Adds support for setting replica identity while creating distributed tables - - * Adds support for ALTER TABLE ... REPLICA IDENTITY queries - - * Adds pushdown support for LIMIT and HAVING grouped by partition key - - * Adds support for INSERT ... SELECT queries via worker nodes on MX clusters - - * Adds support for adding primary key using already defined index - - * Adds replication parameter to shard copy functions - - * Changes shard_name UDF to omit public schema name - - * Adds master_move_node UDF to make changes on nodename/nodeport more easy - - * Fixes a bug that could cause casting error with INSERT ... SELECT queries - - * Fixes a bug that could prevent upgrading servers from Citus 6.1 - - * Fixes a bug that could prevent attaching partitions to a table in schema - - * Fixes a bug preventing adding nodes to clusters with reference tables - - * Fixes a bug that could cause a crash with INSERT ... SELECT queries - - * Fixes a bug that could prevent creating a partitoned table on Cloud - - * Implements various performance improvements - - * Adds internal infrastructures and tests to improve development process - - * Addresses various race conditions and deadlocks - - * Improves and standardizes error messages - - -- Burak Velioglu Wed, 15 Nov 2017 09:00:07 +0000 - -citus (7.0.3.citus-1) stable; urgency=low - - * Fixes several bugs that could cause crash - - * Fixes a bug that could cause deadlock while creating reference tables - - * Fixes a bug that could cause false-positives in deadlock detection - - * Fixes a bug that could cause 2PC recovery not to work from MX workers - - * Fixes a bug that could cause cache incohorency - - * Fixes a bug that could cause maintenance daemon to skip cache invalidations - - * Improves performance of transaction recovery by using correct index - - -- Burak Yucesoy Mon, 16 Oct 2017 11:52:07 +0000 - -citus (7.0.2.citus-1) stable; urgency=low - - * Updates task-tracker to limit file access - - -- Burak Yucesoy Thu, 28 Sep 2017 22:29:01 +0000 - -citus (6.2.4.citus-1) stable; urgency=low - - * Updates task-tracker to limit file access - - -- Burak Yucesoy Thu, 28 Sep 2017 21:31:35 +0000 - -citus (6.1.3.citus-1) stable; urgency=low - - * Updates task-tracker to limit file access - - -- Burak Yucesoy Thu, 28 Sep 2017 20:31:35 +0000 - -citus (7.0.1.citus-1) stable; urgency=low - - * Fixes a bug that could cause memory leaks in INSERT ... SELECT queries - - * Fixes a bug that could cause incorrect execution of prepared statements - - * Fixes a bug that could cause excessive memory usage during COPY - - * Incorporates latest changes from core PostgreSQL code - - -- Burak Yucesoy Tue, 12 Sep 2017 17:53:50 +0000 - -citus (7.0.0.citus-1) stable; urgency=low - - * Adds support for PostgreSQL 10 - - * Drops support for PostgreSQL 9.5 - - * Adds support for multi-row INSERT - - * Adds support for router UPDATE and DELETE queries with subqueries - - * Adds infrastructure for distributed deadlock detection - - * Deprecates enable_deadlock_prevention flag - - * Adds support for partitioned tables - - * Adds support for creating UNLOGGED tables - - * Adds support for SAVEPOINT - - * Adds UDF citus_create_restore_point for taking distributed snapshots - - * Adds support for evaluating non-pushable INSERT ... SELECT queries - - * Adds support for subquery pushdown on reference tables - - * Adds shard pruning support for IN and ANY - - * Adds support for UPDATE and DELETE commands that prune down to 0 shard - - * Enhances transaction support by relaxing some transaction restrictions - - * Fixes a bug causing crash if distributed table has no shards - - * Fixes a bug causing crash when removing inactive node - - * Fixes a bug causing failure during COPY on tables with dropped columns - - * Fixes a bug causing failure during DROP EXTENSION - - * Fixes a bug preventing executing VACUUM and INSERT concurrently - - * Fixes a bug in prepared INSERT statements containing an implicit cast - - * Fixes several issues related to statement cancellations and connections - - * Fixes several 2PC related issues - - * Removes an unnecessary dependency causing warning messages in pg_dump - - * Adds internal infrastructure for follower clusters - - * Adds internal infrastructure for progress tracking - - * Implements various performance improvements - - * Adds internal infrastructures and tests to improve development process - - * Addresses various race conditions and deadlocks - - * Improves and standardizes error messages - - -- Burak Yucesoy Mon, 28 Aug 2017 12:27:53 +0000 - -citus (6.2.3.citus-1) stable; urgency=low - - * Fixes a crash during execution of local CREATE INDEX CONCURRENTLY - - * Fixes a bug preventing usage of quoted column names in COPY - - * Fixes a bug in prepared INSERTs with implicit cast in partition column - - * Relaxes locks in VACUUM to ensure concurrent execution with INSERT - - -- Burak Yucesoy Thu, 13 Jul 2017 11:27:17 +0000 - -citus (6.2.2.citus-1) stable; urgency=low - - * Fixes a common cause of deadlocks when repairing tables with foreign keys - - -- Burak Velioglu Wed, 07 Jun 2017 09:42:17 +0000 - -citus (6.1.2.citus-1) stable; urgency=low - - * Fixes a common cause of deadlocks when repairing tables with foreign keys - - -- Jason Petersen Wed, 31 May 2017 16:14:11 +0000 - -citus (6.2.1.citus-1) stable; urgency=low - - * Relaxes version-check logic to avoid breaking non-distributed commands - - -- Jason Petersen Wed, 24 May 2017 22:36:07 +0000 - -citus (6.2.0.citus-1) stable; urgency=low - - * Increases SQL subquery coverage by pushing down more kinds of queries - - * Adds CustomScan API support to allow read-only transactions - - * Adds support for CREATE/DROP INDEX CONCURRENTLY - - * Adds support for ALTER TABLE ... ADD CONSTRAINT - - * Adds support for ALTER TABLE ... RENAME COLUMN - - * Adds support for DISABLE/ENABLE TRIGGER ALL - - * Adds support for expressions in the partition column in INSERTs - - * Adds support for query parameters in combination with function evaluation - - * Adds support for creating distributed tables from non-empty local tables - - * Adds UDFs to get size of distributed tables - - * Adds UDFs to add a new node without replicating reference tables - - * Adds checks to prevent running Citus binaries with wrong metadata tables - - * Improves shard pruning performance for range queries - - * Improves planner performance for joins involving co-located tables - - * Improves shard copy performance by creating indexes after copy - - * Improves task-tracker performance by batching several status checks - - * Enables router planner for queries on range partitioned table - - * Changes TRUNCATE to drop local data only if enable_ddl_propagation is off - - * Starts to execute DDL on coordinator before workers - - * Fixes a bug causing incorrectly reading invalidated cache - - * Fixes a bug related to creation of schemas in workers with incorrect owner - - * Fixes a bug related to concurrent run of shard drop functions - - * Fixes a bug related to EXPLAIN ANALYZE with DML queries - - * Fixes a bug related to SQL functions in FROM clause - - * Adds a GUC variable to report cross shard queries - - * Fixes a bug related to partition columns without native hash function - - * Adds internal infrastructures and tests to improve development process - - * Addresses various race conditions and deadlocks - - * Improves and standardizes error messages - - -- Burak Yucesoy Tue, 16 May 2017 16:05:22 +0000 - -citus (6.1.1.citus-1) stable; urgency=low - - * Fixes a crash caused by router executor use after connection timeouts - - * Fixes a crash caused by relation cache invalidation during COPY - - * Fixes bug related to DDL use within PL/pgSQL functions - - * Fixes a COPY bug related to types lacking binary output functions - - * Fixes a bug related to modifications with parameterized partition values - - * Fixes improper value interpolation in worker sequence generation - - * Guards shard pruning logic against zero-shard tables - - * Fixes possible NULL pointer dereference and buffer underflow, via PVS-Studio - - * Fixes a INSERT...SELECT bug that could push down non-partition column JOINs - - -- Metin Doslu Fri, 5 May 2017 17:42:00 +0000 - -citus (6.1.0.citus-1) stable; urgency=low - - * Implements reference tables, transactionally replicated to all nodes - - * Adds upgrade_to_reference_table UDF to upgrade pre-6.1 reference tables - - * Expands prepared statement support to nearly all statements - - * Adds support for creating VIEWs which reference distributed tables - - * Adds targeted VACUUM/ANALYZE support - - * Adds support for the FILTER clause in aggregate expressions - - * Adds support for function evaluation within INSERT INTO ... SELECT - - * Adds support for creating foreign key constraints with ALTER TABLE - - * Adds logic to choose router planner for all queries it supports - - * Enhances create_distributed_table with parameter for explicit colocation - - * Adds generally useful utility UDFs previously available as "Citus Tools" - - * Adds user-facing UDFs for locking shard resources and metadata - - * Refactors connection and transaction management for more consistency - - * Enhances COPY with fully transactional semantics - - * Improves support for cancellation for a number of queries and commands - - * Adds column_to_column_name UDF to help users understand partkey values - - * Adds master_disable_node UDF for temporarily disabling nodes - - * Adds proper MX ("masterless") metadata propagation logic - - * Adds start_metadata_sync_to_node UDF to propagate metadata changes to nodes - - * Enhances SERIAL compatibility with MX tables - - * Adds an node_connection_timeout parameter to set node connection timeouts - - * Adds enable_deadlock_prevention setting to permit multi-node transactions - - * Adds a replication_model setting to specify replication of new tables - - * Changes the shard_replication_factor setting's default value to one - - * Adds code to automatically set max_prepared_transactions if not configured - - * Accelerates lookup of colocated shard placements - - * Fixes a bug affecting INSERT INTO ... SELECT queries using constant values - - * Fixes a bug by ensuring COPY does not mark placements inactive - - * Fixes a bug affecting reads from pg_dist_shard_placement table - - * Fixes a crash triggered by creating a foreign key without a column - - * Fixes a crash related to accessing catalog tables after aborted transaction - - * Fixes a bug affecting JOIN queries requiring repartitions - - * Fixes a bug affecting node insertions to pg_dist_node table - - * Fixes a crash triggered by queries with modifying common table expressions - - * Fixes a bug affecting workloads with concurrent shard appends and deletions - - * Addresses various race conditions and deadlocks - - * Improves and standardizes error messages - - -- Burak Yucesoy Thu, 9 Feb 2017 16:17:41 +0000 - -citus (6.0.1.citus-3) stable; urgency=low - - * First build using new versioning practices - - -- Jason Petersen Wed, 8 Feb 2017 23:19:46 +0000 - -citus (6.0.1.citus-2) stable; urgency=low - - * Transitional package to guide users to new package name - - -- Jason Petersen Mon, 6 Feb 2017 16:33:44 +0000 - -citus (6.0.1.citus-1) stable; urgency=low - - * Fixes a bug causing failures during pg_upgrade - - * Fixes a bug preventing DML queries during colocated table creation - - * Fixes a bug that caused NULL parameters to be incorrectly passed as text - - -- Burak Yucesoy Wed, 30 Nov 2016 15:27:38 +0000 - -citus (6.0.0.citus-1) stable; urgency=low - - * Adds compatibility with PostgreSQL 9.6, now the recommended version - - * Removes the pg_worker_list.conf file in favor of a pg_dist_node table - - * Adds master_add_node and master_add_node UDFs to manage membership - - * Removes the \stage command and corresponding csql binary in favor of COPY - - * Removes copy_to_distributed_table in favor of first-class COPY support - - * Adds support for multiple DDL statements within a transaction - - * Adds support for certain foreign key constraints - - * Adds support for parallel INSERT INTO ... SELECT against colocated tables - - * Adds support for the TRUNCATE command - - * Adds support for HAVING clauses in SELECT queries - - * Adds support for EXCLUDE constraints which include the partition column - - * Adds support for system columns in queries (tableoid, ctid, etc.) - - * Adds support for relation name extension within INDEX definitions - - * Adds support for no-op UPDATEs of the partition column - - * Adds several general-purpose utility UDFs to aid in Citus maintenance - - * Adds master_expire_table_cache UDF to forcibly expire cached shards - - * Parallelizes the processing of DDL commands which affect distributed tables - - * Adds support for repartition jobs using composite or custom types - - * Enhances object name extension to handle long names and large shard counts - - * Parallelizes the master_modify_multiple_shards UDF - - * Changes distributed table creation to error if target table is not empty - - * Changes the pg_dist_shard.logicalrelid column from an oid to regclass - - * Adds a placementid column to pg_dist_shard_placement, replacing Oid use - - * Removes the pg_dist_shard.shardalias distribution metadata column - - * Adds pg_dist_partition.repmodel to track tables using streaming replication - - * Adds internal infrastructure to take snapshots of distribution metadata - - * Addresses the need to invalidate prepared statements on metadata changes - - * Adds a mark_tables_colocated UDF for denoting pre-6.0 manual colocation - - * Fixes a bug affecting prepared statement execution within PL/pgSQL - - * Fixes a bug affecting COPY commands using composite types - - * Fixes a bug that could cause crashes during EXPLAIN EXECUTE - - * Separates worker and master job temporary folders - - * Eliminates race condition between distributed modification and repair - - * Relaxes the requirement that shard repairs also repair colocated shards - - * Implements internal functions to track which tables' shards are colocated - - * Adds pg_dist_partition.colocationid to track colocation group membership - - * Extends shard copy and move operations to respect colocation settings - - * Adds pg_dist_local_group to prepare for future MX-related changes - - * Adds create_distributed_table to easily create shards and infer colocation - - -- Jason Petersen Tue, 8 Nov 2016 19:45:45 +0000 - -citus (5.2.2.citus-1) stable; urgency=low - - * Adds support for IF NOT EXISTS clause of CREATE INDEX command - - * Adds support for RETURN QUERY and FOR ... IN PL/pgSQL features - - * Extends the router planner to handle more queries - - * Changes COUNT of zero-row sets to return 0 rather than an empty result - - * Reduces the minimum permitted task_tracker_delay to a single millisecond - - * Fixes a bug that caused crashes during joins with a WHERE false clause - - * Fixes a bug triggered by unique violation errors raised in long txns - - * Fixes a bug resulting in multiple registration of transaction callbacks - - * Fixes a bug which could result in stale reads of distribution metadata - - * Fixes a bug preventing distributed modifications in some PL/pgSQL functions - - * Fixes some code paths that could hypothetically read uninitialized memory - - * Lowers log level of "waiting for activity" messages - - -- Jason Petersen Tue, 8 Nov 2016 18:43:37 +0000 - -citus (5.2.1.citus-1) stable; urgency=low - - * Fixes subquery pushdown to properly extract outer join qualifiers - - * Addresses possible memory leak during multi-shard transactions - - -- Jason Petersen Tue, 6 Sep 2016 20:47:15 +0000 - -citus (5.2.0.citus-1) stable; urgency=low - - * Drops support for PostgreSQL 9.4; PostgreSQL 9.5 is required - - * Adds schema support for tables, named objects (types, operators, etc.) - - * Evaluates non-immutable functions on master in all modification commands - - * Adds support for SERIAL types in non-partition columns - - * Adds support for RETURNING clause in INSERT, UPDATE, and DELETE commands - - * Adds support for multi-statement transactions using a fixed set of nodes - - * Full SQL support for SELECT queries which can be executed on single worker - - * Adds option to perform DDL changes using prepared transactions (2PC) - - * Adds an enable_ddl_propagation parameter to control DDL propagation - - * Accelerates shard pruning during merges - - * Adds master_modify_multiple_shards UDF to modify many shards at once - - * Adds COPY support for arrays of user-defined types - - * Now supports parameterized prepared statements for certain use cases - - * Extends LIMIT/OFFSET support to all executor types - - * Constraint violations now fail fast rather than hitting all placements - - * Makes master_create_empty_shard aware of shard placement policy - - * Reduces unnecessary sleep during queries processed by real-time executor - - * Improves task tracker executor's task cleanup logic - - * Relaxes restrictions on cancellation of DDL commands - - * Removes ONLY keyword from worker SELECT queries - - * Error message improvements and standardization - - * Moves master_update_shard_statistics function to pg_catalog schema - - * Fixes a bug where hash-partitioned anti-joins could return bad results - - * Now sets storage type correctly for foreign table-backed shards - - * Fixes master_update_shard_statistics issue with hash-partitioned tables - - * Fixes an issue related to extending table names that require escaping - - * Reduces risk of row counter overflows during modifications - - * Fixes a crash related to FILTER clause use in COUNT DISTINCT subqueries - - * Fixes crashes related to partition columns with high attribute numbers - - * Fixes certain subquery and join crashes - - * Detects flex for build even if PostgreSQL was built without it - - * Fixes assert-enabled crash when all_modifications_commutative is true - - -- Jason Petersen Wed, 17 Aug 2016 10:23:21 +0000 - -citus (5.2.0~rc.1-1) testing; urgency=low - - * Release candidate for 5.2. - - -- Jason Petersen Mon, 01 Aug 2016 17:01:05 +0000 - -citus (5.1.1-1) stable; urgency=low - - * Adds complex count distinct expression support in repartitioned subqueries - - * Improves task tracker job cleanup logic, addressing a memory leak - - * Fixes bug that generated incorrect results for LEFT JOIN queries - - * Improves compatibility with Debian's reproducible builds project - - * Fixes build issues on FreeBSD platforms - - -- Jason Petersen Fri, 17 Jun 2016 16:20:15 +0000 - -citus (5.1.0-1) stable; urgency=low - - * Adds distributed COPY to rapidly populate distributed tables - - * Adds support for using EXPLAIN on distributed queries - - * Recognizes and fast-paths single-shard SELECT statements automatically - - * Increases INSERT throughput via shard pruning optimizations - - * Improves planner performance for joins involving tables with many shards - - * Adds ability to pass columns as arguments to function calls in UPDATEs - - * Introduces transaction manager for use by multi-shard commands - - * Adds COUNT(DISTINCT ...) pushdown optimization for hash-partitioned tables - - * Adds support for some UNIQUE indexes on hash- or range-partitioned tables - - * Deprecates \stage in favor of using COPY for append-partition tables - - * Deprecates copy_to_distributed_table in favor of first-class COPY support - - * Fixes build problems when using non-packaged PostgreSQL installs - - * Fixes bug that sometimes skipped pruning when partitioned by VARCHAR column - - * Fixes bug impeding use of user functions in repartitioned subqueries - - * Fixes bug involving queries with equality comparisons of boolean types - - * Fixes crash that prevented use alongside pg_stat_statements - - * Fixes crash arising from SELECT queries that lack a target list - - * Improves warning and error messages - - -- Jason Petersen Tue, 17 May 2016 16:55:02 +0000 - -citus (5.1.0~rc.2-1) testing; urgency=low - - * Fix EXPLAIN output when FORMAT JSON in use - - -- Jason Petersen Mon, 16 May 2016 11:08:09 +0000 - -citus (5.1.0~rc.1-1) testing; urgency=low - - * Release candidate for 5.1. - - -- Jason Petersen Wed, 04 May 2016 19:26:23 +0000 - -citus (5.0.1-1) stable; urgency=low - - * Fixes issues on 32-bit systems - - -- Jason Petersen Fri, 15 Apr 2016 19:17:35 +0000 - -citus (5.0.0-1) stable; urgency=low - - * Initial release - - -- Jason Petersen Thu, 24 Mar 2016 10:12:52 -0400 +citus (10.1.4.citus-1) stable; urgency=low + + * Official 10.1.4 release of Citus + + -- Gurkan Indibay Tue, 01 Feb 2022 11:49:11 +0000 + +citus (10.2.3.citus-1) stable; urgency=low + + * Official 10.2.3 release of Citus + + -- Gurkan Indibay Mon, 29 Nov 2021 11:00:41 +0000 + +citus (10.0.6.citus-1) stable; urgency=low + + * Official 10.0.6 release of Citus + + -- Gurkan Indibay Fri, 12 Nov 2021 11:37:25 +0000 + +citus (9.5.10.citus-1) stable; urgency=low + + * Official 9.5.10 release of Citus + + -- Gurkan Indibay Mon, 08 Nov 2021 14:18:56 +0000 + +citus (9.2.8.citus-1) stable; urgency=low + + * Official 9.2.8 release of Citus + + -- Gurkan Indibay Thu, 04 Nov 2021 12:45:09 +0000 + +citus (9.2.7.citus-1) stable; urgency=low + + * Official 9.2.7 release of Citus + + -- Gurkan Indibay Wed, 03 Nov 2021 09:34:30 +0000 + +citus (10.2.2.citus-1) stable; urgency=low + + * Official 10.2.2 release of Citus + + -- Gurkan Indibay Thu, 14 Oct 2021 13:00:27 +0000 + +citus (10.2.1.citus-1) stable; urgency=low + + * Adds missing version-mismatch checks for columnar tables + + * Adds missing version-mismatch checks for internal functions + + * Fixes a bug that could cause partition shards being not co-located with + parent shards + + * Fixes a bug that prevents pushing down boolean expressions when using + columnar custom scan + + * Fixes a clog lookup failure that could occur when writing to a columnar + table + + * Fixes an issue that could cause unexpected errors when there is an + in-progress write to a columnar table + + * Revokes read access to `columnar.chunk` from unprivileged user + + -- Gurkan Indibay Fri, 24 Sep 2021 12:03:35 +0000 + +citus (10.1.3.citus-1) stable; urgency=low + + * Official 10.1.3 release of Citus + + -- Gurkan Indibay Fri, 17 Sep 2021 17:23:05 +0000 + +citus (10.2.0.citus-1) stable; urgency=low + + * Adds PostgreSQL 14 support + + * Adds hash & btree index support for columnar tables + + * Adds helper UDFs for easy time partition management: + `get_missing_time_partition_ranges`, `create_time_partitions`, and + `drop_old_time_partitions` + + * Adds propagation of ALTER SEQUENCE + + * Adds support for ALTER INDEX ATTACH PARTITION + + * Adds support for CREATE INDEX ON ONLY + + * Allows more graceful failovers when replication factor > 1 + + * Enables chunk group filtering to work with Params for columnar tables + + * Enables qual push down for joins including columnar tables + + * Enables transferring of data using binary encoding by default on PG14 + + * Improves `master_update_table_statistics` and provides distributed deadlock + detection + + * Includes `data_type` and `cache` in sequence definition on worker + + * Makes start/stop_metadata_sync_to_node() transactional + + * Makes sure that table exists before updating table statistics + + * Prevents errors with concurrent `citus_update_table_statistics` and DROP table + + * Reduces memory usage of columnar table scans by freeing the memory used for + last stripe read + + * Shows projected columns for columnar tables in EXPLAIN output + + * Speeds up dropping partitioned tables + + * Synchronizes hasmetadata flag on mx workers + + * Uses current user while syncing metadata + + * Adds a parameter to cleanup metadata when metadata syncing is stopped + + * Fixes a bug about int and smallint sequences on MX + + * Fixes a bug that cause partitions to have wrong distribution key after + DROP COLUMN + + * Fixes a bug that caused `worker_append_table_to_shard` to write as superuser + + * Fixes a bug that caused `worker_create_or_alter_role` to crash with NULL input + + * Fixes a bug that causes pruning incorrect shard of a range distributed table + + * Fixes a bug that may cause crash while aborting transaction + + * Fixes a bug that prevents attaching partitions when colocated foreign key + exists + + * Fixes a bug with `nextval('seq_name'::text)` + + * Fixes a crash in shard rebalancer when no distributed tables exist + + * Fixes a segfault caused by use after free in when using a cached connection + + * Fixes a UNION pushdown issue + + * Fixes a use after free issue that could happen when altering a distributed + table + + * Fixes showing target shard size in the rebalance progress monitor + + -- Gurkan Indibay Thu, 16 Sep 2021 08:45:17 +0000 + +citus (10.1.2.citus-1) stable; urgency=low + + * Allows more graceful failovers when replication factor > 1 + + * Fixes a bug that causes partitions to have wrong distribution key after + `DROP COLUMN` + + -- Gurkan Indibay Tue, 17 Aug 2021 16:25:14 +0000 + +citus (10.0.5.citus-1) stable; urgency=low + + * Allows more graceful failovers when replication factor > 1 + + * Fixes a bug that causes partitions to have wrong distribution key after + `DROP COLUMN` + + * Improves citus_update_table_statistics and provides distributed deadlock + detection + + -- Gurkan Indibay Tue, 17 Aug 2021 14:42:54 +0000 + +citus (9.5.7.citus-1) stable; urgency=low + + * Allows more graceful failovers when replication factor > 1 + + * Fixes a bug that causes partitions to have wrong distribution key after + `DROP COLUMN` + + * Improves master_update_table_statistics and provides distributed deadlock + detection + + -- Gurkan Indibay Tue, 17 Aug 2021 13:18:11 +0000 + +citus (9.4.6.citus-1) stable; urgency=low + + * Allows more graceful failovers when replication factor > 1 + + * Improves master_update_table_statistics and provides distributed deadlock + detection + + -- Gurkan Indibay Wed, 11 Aug 2021 08:57:04 +0000 + +citus (10.1.1.citus-1) stable; urgency=low + + * Improves citus_update_table_statistics and provides distributed deadlock + detection + + * Fixes showing target shard size in the rebalance progress monitor + + -- Gurkan Indibay Fri, 06 Aug 2021 08:38:37 +0000 + +citus (10.1.0.citus-1) stable; urgency=low + + * Drops support for PostgreSQL 11 + + * Adds `shard_count` parameter to `create_distributed_table` function + + * Adds support for `ALTER DATABASE OWNER` + + * Adds support for temporary columnar tables + + * Adds support for using sequences as column default values when syncing + metadata + + * `alter_columnar_table_set` enforces columnar table option constraints + + * Continues to remove shards after failure in `DropMarkedShards` + + * Deprecates the `citus.replication_model` GUC + + * Enables `citus.defer_drop_after_shard_move` by default + + * Ensures free disk space before moving a shard + + * Fetches shard size on the fly for the rebalance monitor + + * Ignores old placements when disabling or removing a node + + * Implements `improvement_threshold` at shard rebalancer moves + + * Improves orphaned shard cleanup logic + + * Improves performance of `citus_shards` + + * Introduces `citus.local_hostname` GUC for connections to the current node + + * Makes sure connection is closed after each shard move + + * Makes sure that target node in shard moves is eligible for shard move + + * Optimizes partitioned disk size calculation for shard rebalancer + + * Prevents connection errors by properly terminating connections + + * Prevents inheriting a distributed table + + * Prevents users from dropping & truncating known shards + + * Pushes down `VALUES` clause as long as not in outer part of a `JOIN` + + * Reduces memory usage for multi-row inserts + + * Reduces memory usage while rebalancing shards + + * Removes length limits around partition names + + * Removes dependencies on the existence of public schema + + * Executor avoids opening extra connections + + * Excludes orphaned shards while finding shard placements + + * Preserves access method of materialized views when undistributing + or altering distributed tables + + * Fixes a bug that allowed moving of shards belonging to a reference table + + * Fixes a bug that can cause a crash when DEBUG4 logging is enabled + + * Fixes a bug that causes pruning incorrect shard of a range distributed table + + * Fixes a bug that causes worker_create_or_alter_role to crash with NULL input + + * Fixes a bug where foreign key to reference table was disallowed + + * Fixes a bug with local cached plans on tables with dropped columns + + * Fixes data race in `get_rebalance_progress` + + * Fixes `FROM ONLY` queries on partitioned tables + + * Fixes an issue that could cause citus_finish_pg_upgrade to fail + + * Fixes error message for local table joins + + * Fixes issues caused by omitting public schema in queries + + * Fixes nested `SELECT` query with `UNION` bug + + * Fixes null relationName bug at parallel execution + + * Fixes possible segfaults when using Citus in the middle of an upgrade + + * Fixes problems with concurrent calls of `DropMarkedShards` + + * Fixes shared dependencies that are not resident in a database + + * Fixes stale hostnames bug in prepared statements after `master_update_node` + + * Fixes the relation size bug during rebalancing + + * Fixes two race conditions in the get_rebalance_progress + + * Fixes using 2PC when it might be necessary + + -- Gurkan Indibay Fri, 16 Jul 2021 15:37:21 +0000 + +citus (10.0.4.citus-1) stable; urgency=low + + * Introduces `citus.local_hostname` GUC for connections to the current node + + * Removes dependencies on the existence of public schema + + * Removes limits around long partition names + + * Fixes a bug that can cause a crash when DEBUG4 logging is enabled + + * Fixes a bug that causes pruning incorrect shard of a range distributed table + + * Fixes an issue that could cause citus_finish_pg_upgrade to fail + + * Fixes FROM ONLY queries on partitioned tables + + * Fixes issues caused by public schema being omitted in queries + + * Fixes problems with concurrent calls of DropMarkedShards + + * Fixes relname null bug when using parallel execution + + * Fixes two race conditions in the get_rebalance_progress + + -- Gurkan Indibay Fri, 16 Jul 2021 10:58:55 +0000 + +citus (9.5.6.citus-1) stable; urgency=low + + * Fixes minor bug in `citus_prepare_pg_upgrade` that caused it to lose its + idempotency + + -- Gurkan Fri, 09 Jul 2021 13:30:42 +0000 + +citus (9.4.5.citus-1) stable; urgency=low + + * Adds a configure flag to enforce security + + * Avoids re-using connections for intermediate results + + * Fixes a bug that causes pruning incorrect shard of a range distributed table + + * Fixes a bug that might cause self-deadlocks when COPY used in TX block + + * Fixes an issue that could cause citus_finish_pg_upgrade to fail + + -- Gurkan Thu, 08 Jul 2021 10:15:23 +0000 + +citus (10.0.3.citus-1) stable; urgency=low + + * Prevents infinite recursion for queries that involve UNION ALL + below `JOIN` + + * Fixes a crash in queries with a modifying CTE and a SELECT + without `FROM` + + * Fixes upgrade and downgrade paths for citus_update_table_statistics + + * Fixes a bug that causes SELECT queries to use 2PC unnecessarily + + * Fixes a bug that might cause self-deadlocks with + `CREATE INDEX` / `REINDEX CONCURRENTLY` commands + + * Adds citus.max_cached_connection_lifetime GUC to set maximum connection + lifetime + + * Adds citus.remote_copy_flush_threshold GUC that controls + per-shard memory usages by `COPY` + + * Adds citus_get_active_worker_nodes UDF to deprecate + `master_get_active_worker_nodes` + + * Skips 2PC for readonly connections in a transaction + + * Makes sure that local execution starts coordinated transaction + + * Removes open temporary file warning when cancelling a query with + an open tuple store + + * Relaxes the locks when adding an existing node + + -- Gurkan Indibay Thu, 18 Mar 2021 01:40:08 +0000 + +citus (10.0.2.citus-1) stable; urgency=low + + * Adds a configure flag to enforce security + + * Fixes a bug due to cross join without target list + + * Fixes a bug with UNION ALL on PG 13 + + * Fixes a compatibility issue with pg_audit in utility calls + + * Fixes insert query with CTEs/sublinks/subqueries etc + + * Grants SELECT permission on citus_tables view to public + + * Grants SELECT permission on columnar metadata tables to public + + * Improves citus_update_table_statistics and provides distributed deadlock + detection + + * Preserves colocation with procedures in alter_distributed_table + + * Prevents using alter_columnar_table_set and alter_columnar_table_reset + on a columnar table not owned by the user + + * Removes limits around long table names + + -- Gurkan Indibay Thu, 4 Mar 2021 2:46:54 +0000 + +citus (9.5.2.citus-1) stable; urgency=low + + * Fixes distributed deadlock detection being blocked by metadata sync + + * Prevents segfaults when SAVEPOINT handling cannot recover from connection + failures + + * Fixes possible issues that might occur with single shard distributed tables + + -- gurkanindibay Wed, 27 Jan 2021 11:25:38 +0000 + +citus (9.4.4.citus-1) stable; urgency=low + + * Fixes a bug that could cause router queries with local tables to be pushed + down + + * Fixes a segfault in connection management due to invalid connection hash + entries + + * Fixes possible issues that might occur with single shard distributed tables + + -- gurkanindibay Tue, 5 Jan 2021 14:58:56 +0000 + +citus (9.5.1.citus-1) stable; urgency=low + + * Enables PostgreSQL's parallel queries on EXPLAIN ANALYZE + + * Fixes a bug that could cause excessive memory consumption when a partition is + created + + * Fixes a bug that triggers subplan executions unnecessarily with cursors + + * Fixes a segfault in connection management due to invalid connection hash + entries + + -- Onur Tirtir Wed, 2 Dec 2020 14:28:44 +0000 + +citus (9.4.3.citus-1) stable; urgency=low + + * Enables PostgreSQL's parallel queries on EXPLAIN ANALYZE + + * Fixes a bug that triggers subplan executions unnecessarily with cursors + + -- Onur Tirtir Tue, 24 Nov 2020 11:17:57 +0000 + +citus (9.5.0.citus-1) stable; urgency=low + + * Adds support for PostgreSQL 13 + + * Removes the task-tracker executor + + * Introduces citus local tables + + * Introduces undistribute_table UDF to convert tables back to postgres tables + + * Adds support for EXPLAIN (ANALYZE) EXECUTE and EXPLAIN EXECUTE + + * Adds support for EXPLAIN (ANALYZE, WAL) for PG13 + + * Sorts the output of EXPLAIN (ANALYZE) by execution duration. + + * Adds support for CREATE TABLE ... USING table_access_method + + * Adds support for WITH TIES option in SELECT and INSERT SELECT queries + + * Avoids taking multi-shard locks on workers + + * Enforces citus.max_shared_pool_size config in COPY queries + + * Enables custom aggregates with multiple parameters to be executed on workers + + * Enforces citus.max_intermediate_result_size in local execution + + * Improves cost estimation of INSERT SELECT plans + + * Introduces delegation of procedures that read from reference tables + + * Prevents pull-push execution for simple pushdownable subqueries + + * Improves error message when creating a foreign key to a local table + + * Makes citus_prepare_pg_upgrade idempotent by dropping transition tables + + * Disallows ON TRUE outer joins with reference & distributed tables when + reference table is outer relation to avoid incorrect results + + * Disallows field indirection in INSERT/UPDATE queries to avoid incorrect + results + + * Disallows volatile functions in UPDATE subqueries to avoid incorrect results + + * Fixes CREATE INDEX CONCURRENTLY crash with local execution + + * Fixes citus_finish_pg_upgrade to drop all backup tables + + * Fixes a bug that cause failures when RECURSIVE VIEW joined reference table + + * Fixes DROP SEQUENCE failures when metadata syncing is enabled + + * Fixes a bug that caused CREATE TABLE with CHECK constraint to fail + + * Fixes a bug that could cause VACUUM to deadlock + + * Fixes master_update_node failure when no background worker slots are available + + * Fixes a bug that caused replica identity to not be propagated on shard repair + + * Fixes a bug that could cause crashes after connection timeouts + + * Fixes a bug that could cause crashes with certain compile flags + + * Fixes a bug that could cause deadlocks on CREATE INDEX + + * Fixes a bug with genetic query optimization in outer joins + + * Fixes a crash when aggregating empty tables + + * Fixes a crash with inserting domain constrained composite types + + * Fixes a crash with multi-row & router INSERT's in local execution + + * Fixes a possibility of doing temporary file cleanup more than once + + * Fixes incorrect setting of join related fields + + * Fixes memory issues around deparsing index commands + + * Fixes reference table access tracking for sequential execution + + * Fixes removal of a single node with only reference tables + + * Fixes sending commands to coordinator when it is added as a worker + + * Fixes write queries with const expressions and COLLATE in various places + + * Fixes wrong cancellation message about distributed deadlock + + -- Onur Tirtir Wed, 11 Nov 2020 15:00:27 +0000 + +citus (9.4.2.citus-1) stable; urgency=low + + * Fixes a bug that could lead to multiple maintenance daemons + + * Fixes an issue preventing views in reference table modifications + + -- Onur Tirtir Thu, 22 Oct 2020 8:53:44 +0000 + +citus (9.4.1.citus-1) stable; urgency=low + + * Fixes EXPLAIN ANALYZE output truncation + + * Fixes a deadlock during transaction recovery + + -- Onur Tirtir Wed, 30 Sep 2020 9:33:46 +0000 + +citus (9.4.0.citus-1) stable; urgency=low + + * Improves COPY by honoring max_adaptive_executor_pool_size config + + * Adds support for insert into local table select from distributed table + + * Adds support to partially push down tdigest aggregates + + * Adds support for receiving binary encoded results from workers using + citus.enable_binary_protocol + + * Enables joins between local tables and CTEs + + * Adds showing query text in EXPLAIN output when explain verbose is true + + * Adds support for showing CTE statistics in EXPLAIN ANALYZE + + * Adds support for showing amount of data received in EXPLAIN ANALYZE + + * Introduces downgrade paths in migration scripts + + * Avoids returning incorrect results when changing roles in a transaction + + * Fixes ALTER TABLE IF EXISTS SET SCHEMA with non-existing table bug + + * Fixes CREATE INDEX CONCURRENTLY with no index name on a postgres table bug + + * Fixes a bug that could cause crashes with certain compile flags + + * Fixes a bug with lists of configuration values in ALTER ROLE SET statements + + * Fixes a bug that occurs when coordinator is added as a worker node + + * Fixes a crash because of overflow in partition id with certain compile flags + + * Fixes a crash that may happen if no worker nodes are added + + * Fixes a crash that occurs when inserting implicitly coerced constants + + * Fixes a crash when aggregating empty tables + + * Fixes a memory leak in subtransaction memory handling + + * Fixes crash when using rollback to savepoint after cancellation of DML + + * Fixes deparsing for queries with anonymous column references + + * Fixes distribution of composite types failing to include typemods + + * Fixes explain analyze on adaptive executor repartitions + + * Fixes possible error throwing in abort handle + + * Fixes segfault when evaluating func calls with default params on coordinator + + * Fixes several EXPLAIN ANALYZE issues + + * Fixes write queries with const expressions and COLLATE in various places + + * Fixes wrong cancellation message about distributed deadlocks + + * Reports correct INSERT/SELECT method in EXPLAIN + + * Disallows triggers on citus tables + + -- Onur Tirtir Tue, 28 Jul 2020 13:22:31 +0000 + +citus (9.3.5.citus-1) stable; urgency=low + + * Fixes ALTER TABLE IF EXISTS SET SCHEMA with non-existing table bug + + * Fixes CREATE INDEX CONCURRENTLY with no index name on a postgres table bug + + * Fixes a crash because of overflow in partition id with certain compile flags + + -- Onur Tirtir Mon, 27 Jul 2020 7:28:18 +0000 + +citus (9.3.4.citus-1) stable; urgency=low + + * Fixes a bug that could cause crashes with certain compile flags + + * Fixes a bug with lists of configuration values in ALTER ROLE SET statements + + * Fixes deparsing for queries with anonymous column references + + -- Onur Tirtir Wed, 22 Jul 2020 9:00:01 +0000 + +citus (9.3.3.citus-1) stable; urgency=low + + * Fixes a memory leak in subtransaction memory handling + + -- Onur Tirtir Mon, 13 Jul 2020 8:47:40 +0000 + +citus (9.3.0.citus-1) stable; urgency=low + + * Adds max_shared_pool_size to control number of connections across sessions + + * Adds support for window functions on coordinator + + * Improves shard pruning logic to understand OR-conditions + + * Prevents using an extra connection for intermediate result multi-casts + + * Adds propagation of ALTER ROLE .. SET statements + + * Adds update_distributed_table_colocation UDF to update colocation of tables + + * Introduces a UDF to truncate local data after distributing a table + + * Adds support for creating temp schemas in parallel + + * Adds support for evaluation of nextval in the target list on coordinator + + * Adds support for local execution of COPY/TRUNCATE/DROP/DDL commands + + * Adds support for local execution of shard creation + + * Uses local execution in a transaction block + + * Adds support for querying distributed table sizes concurrently + + * Allows master_copy_shard_placement to replicate placements to new nodes + + * Allows table type to be used in target list + + * Avoids having multiple maintenance daemons active for a single database + + * Defers reference table replication to shard creation time + + * Enables joins between local tables and reference tables in transaction blocks + + * Ignores pruned target list entries in coordinator plan + + * Improves SIGTERM handling of maintenance daemon + + * Increases the default of citus.node_connection_timeout to 30 seconds + + * Fixes a bug that occurs when creating remote tasks in local execution + + * Fixes a bug that causes some DML queries containing aggregates to fail + + * Fixes a bug that could cause failures in queries with subqueries or CTEs + + * Fixes a bug that may cause some connection failures to throw errors + + * Fixes a bug which caused queries with SRFs and function evalution to fail + + * Fixes a bug with generated columns when executing COPY dist_table TO file + + * Fixes a crash when using non-constant limit clauses + + * Fixes a failure when composite types used in prepared statements + + * Fixes a possible segfault when dropping dist. table in a transaction block + + * Fixes a possible segfault when non-pushdownable aggs are solely used in HAVING + + * Fixes a segfault when executing queries using GROUPING + + * Fixes an error when using LEFT JOIN with GROUP BY on primary key + + * Fixes an issue with distributing tables having generated cols not at the end + + * Fixes automatic SSL permission issue when using "initdb --allow-group-access" + + * Fixes errors which could occur when subqueries are parameters to aggregates + + * Fixes possible issues by invalidating the plan cache in master_update_node + + * Fixes timing issues which could be caused by changing system clock + + -- Onur Tirtir Thu, 7 May 2020 15:11:25 +0000 + +citus (9.2.4.citus-1) stable; urgency=low + + * Fixes a release problem in 9.2.3 + + -- Onur Tirtir Tue, 31 Mar 2020 08:06:59 +0000 + +citus (9.2.3.citus-1) stable; urgency=low + + * Do not use C functions that have been banned by Microsoft + + * Fixes a bug that causes wrong results with complex outer joins + + * Fixes issues found using static analysis + + * Fixes left join shard pruning in pushdown planner + + * Fixes possibility for segmentation fault in internal aggregate functions + + * Fixes possible segfault when non pushdownable aggregates are used in HAVING + + * Improves correctness of planning subqueries in HAVING + + * Prevents using old connections for security if citus.node_conninfo changed + + * Uses Microsoft approved cipher string for default TLS setup + + -- Onur Tirtir Thu, 26 Mar 2020 8:22:48 +0000 + +citus (9.0.2.citus-1) stable; urgency=low + + * Fixes build errors on EL/OL 6 based distros + + * Fixes a bug that caused maintenance daemon to fail on standby nodes + + * Disallows distributed function creation when replication_model is `statement` + + -- Onur Tirtir Fri, 6 Mar 2020 14:10:16 +0000 + +citus (9.2.2.citus-1) stable; urgency=low + + * Fixes a bug that caused some prepared stmts with function calls to fail + + * Fixes a bug that caused some prepared stmts with composite types to fail + + * Fixes a bug that caused missing subplan results in workers + + * Improves performance of re-partition joins + + -- Onur Tirtir Fri, 6 Mar 2020 07:14:20 +0000 + +citus (9.2.1.citus-1) stable; urgency=low + + * Fixes a bug that could cause crashes if distribution key is NULL + + -- Onur Tirtir Fri, 14 Feb 2020 11:51:09 +0000 + +citus (9.2.0.citus-1) stable; urgency=low + + * Adds support for INSERT...SELECT queries with re-partitioning + + * Adds citus.coordinator_aggregation_strategy to support more aggregates + + * Adds caching of local plans on shards for Citus MX + + * Adds compatibility support for dist. object infrastructure from old versions + + * Adds defering shard-pruning for fast-path router queries to execution + + * Adds propagation of GRANT ... ON SCHEMA queries + + * Adds support for CTE pushdown via CTE inlining in distributed planning + + * Adds support for ALTER TABLE ... SET SCHEMA propagation. + + * Adds support for DROP ROUTINE & ALTER ROUTINE commands + + * Adds support for any inner join on a reference table + + * Changes citus.log_remote_commands level to NOTICE + + * Disallows marking ref. table shards unhealthy in the presence of savepoints + + * Disallows placing new shards with shards in TO_DELETE state + + * Enables local execution of queries that do not need any data access + + * Fixes Makefile trying to cleanup PG directory during install + + * Fixes a bug causing errors when planning a query with multiple subqueries + + * Fixes a possible deadlock that could happen during shard moves + + * Fixes a problem when adding a new node due to tables referenced in func body + + * Fixes an issue that could cause joins with reference tables to be slow + + * Fixes cached metadata for shard is inconsistent issue + + * Fixes inserting multiple composite types as partition key in VALUES + + * Fixes unnecessary repartition on joins with more than 4 tables + + * Prevents wrong results for replicated partitioned tables after failure + + * Restricts LIMIT approximation for non-commutative aggregates + + -- Onur Tirtir Wed, 10 Feb 2020 8:48:00 +0000 + +citus (9.1.1.citus-1) stable; urgency=low + + * Fixes a bug causing SQL-executing UDFs to crash when passing in DDL + + * Fixes a bug that caused column_to_column_name to crash for invalid input + + * Fixes a bug that caused inserts into local tables w/ dist. subqueries to crash + + * Fixes a bug that caused some noop DML statements to fail + + * Fixes a bug that prevents dropping reference table columns + + * Fixes a crash in IN (.., NULL) queries + + * Fixes a crash when calling a distributed function from PL/pgSQL + + * Fixes an issue that caused CTEs to sometimes leak connections + + * Fixes strange errors in DML with unreachable sublinks + + * Prevents statements in SQL functions to run outside of a transaction + + -- Onur Tirtir Wed, 18 Dec 2019 14:32:42 +0000 + +citus (9.1.0.citus-1) stable; urgency=low + + * Adds extensions to distributed object propagation infrastructure + + * Adds support for ALTER ROLE propagation + + * Adds support for aggregates in create_distributed_function + + * Adds support for expressions in reference joins + + * Adds support for returning RECORD in multi-shard queries + + * Adds support for simple IN subqueries on unique cols in repartition joins + + * Adds support for subqueries in HAVING clauses + + * Automatically distributes unary aggs w/ combinefunc and non-internal stype + + * Disallows distributed func creation when replication_model is 'statement' + + * Drops support for deprecated real-time and router executors + + * Fixes a bug in local execution that could cause missing rows in RETURNING + + * Fixes a bug that caused maintenance daemon to fail on standby nodes + + * Fixes a bug that caused other CREATE EXTENSION commands to take longer + + * Fixes a bug that prevented REFRESH MATERIALIZED VIEW + + * Fixes a bug when view is used in modify statements + + * Fixes a memory leak in adaptive executor when query returns many columns + + * Fixes underflow init of default values in worker extended op node creation + + * Fixes potential segfault in standard_planner inlining functions + + * Fixes an issue that caused failures in RHEL 6 builds + + * Fixes queries with repartition joins and group by unique column + + * Improves CTE/Subquery performance by pruning intermediate rslt broadcasting + + * Removes citus.worker_list_file GUC + + * Revokes usage from the citus schema from public + + -- Onur Tirtir Thu, 28 Nov 2019 15:11:05 +0000 + +citus (9.0.1.citus-1) stable; urgency=low + + * Fixes a memory leak in the executor + + * Revokes usage from the citus schema from public + + -- Hanefi Onaldi Wed, 30 Oct 2019 8:53:22 +0000 + +citus (9.0.0.citus-1) stable; urgency=low + + * Adds support for PostgreSQL 12 + + * Adds UDFs to help with PostgreSQL upgrades + + * Distributes types to worker nodes + + * Introduces create_distributed_function UDF + + * Introduces local query execution for Citus MX + + * Implements infrastructure for routing CALL to MX workers + + * Implements infrastructure for routing SELECT function() to MX workers + + * Adds support for foreign key constraints between reference tables + + * Adds a feature flag to turn off CREATE TYPE propagation + + * Adds option citus.single_shard_commit_protocol + + * Adds support for EXPLAIN SUMMARY + + * Adds support for GENERATE ALWAYS AS STORED + + * Adds support for serial and smallserial in MX mode + + * Adds support for anon composite types on the target list in router queries + + * Avoids race condition between create_reference_table & master_add_node + + * Fixes a bug in schemas of distributed sequence definitions + + * Fixes a bug that caused run_command_on_colocated_placements to fail + + * Fixes a bug that leads to various issues when a connection is lost + + * Fixes a schema leak on CREATE INDEX statement + + * Fixes assert failure in bare SELECT FROM reference table FOR UPDATE in MX + + * Makes master_update_node MX compatible + + * Prevents pg_dist_colocation from multiple records for reference tables + + * Prevents segfault in worker_partition_protocol edgecase + + * Propagates ALTER FUNCTION statements for distributed functions + + * Propagates CREATE OR REPLACE FUNCTION for distributed functions + + * Propagates REINDEX on tables & indexes + + * Provides a GUC to turn of the new dependency propagation functionality + + * Uses 2PC in adaptive executor when dealing with replication factors above 1 + + -- Hanefi Onaldi Tue, 15 Oct 2019 16:54:50 +0000 + +citus (8.3.2.citus-1) stable; urgency=low + + * Fixes performance issues by skipping unnecessary relation access recordings + + -- Hanefi Onaldi Fri, 9 Aug 2019 11:15:57 +0000 + +citus (8.3.1.citus-1) stable; urgency=low + + * Improves Adaptive Executor performance + + -- Hanefi Onaldi Mon, 29 Jul 2019 10:25:50 +0000 + +citus (8.3.0.citus-1) stable; urgency=low + + * Adds a new distributed executor: Adaptive Executor + + * citus.enable_statistics_collection defaults to off (opt-in) + + * Adds support for CTEs in router planner for modification queries + + * Adds support for propagating SET LOCAL at xact start + + * Adds option to force master_update_node during failover + + * Deprecates master_modify_multiple_shards + + * Improves round robin logic on router queries + + * Creates all distributed schemas as superuser on a separate connection + + * Makes COPY adapt to connection use behaviour of previous commands + + * Replaces SESSION_LIFESPAN with configurable no. of connections at xact end + + * Propagates ALTER FOREIGN TABLE commands to workers + + * Don't schedule tasks on inactive nodes + + * Makes DROP/VALIDATE CONSTRAINT tolerant of ambiguous shard extension + + * Fixes an issue with subquery map merge jobs as non-root + + * Fixes null pointers caused by partial initialization of ConnParamsHashEntry + + * Fixes errors caused by joins with shadowed aliases + + * Fixes a regression in outer joining subqueries introduced in 8.2.0 + + * Fixes a crash that can occur under high memory load + + * Fixes a bug that selects wrong worker when using round-robin assignment + + * Fixes savepoint rollback after multi-shard modify/copy failure + + * Fixes bad foreign constraint name search + + * Fixes a bug that prevents stack size to be adjusted + + -- Hanefi Onaldi Wed, 10 Jul 2019 15:19:02 +0000 + +citus (8.2.2.citus-1) stable; urgency=low + + * Fixes a bug in outer joins wrapped in subqueries + + -- Burak Velioglu Wed, 12 Jun 2019 8:45:08 +0000 + +citus (8.2.1.citus-1) stable; urgency=low + + * Fixes a bug that prevents stack size to be adjusted + + -- Burak Velioglu Wed, 3 Apr 2019 20:56:47 +0000 + +citus (8.1.2.citus-1) stable; urgency=low + + * Don't do redundant ALTER TABLE consistency checks at coordinator + + * Fixes a bug that prevents stack size to be adjusted + + * Fix an issue with some DECLARE .. CURSOR WITH HOLD commands + + -- Burak Velioglu Wed, 3 Apr 2019 20:34:46 +0000 + +citus (8.2.0.citus-1) stable; urgency=low + + * Removes support and code for PostgreSQL 9.6 + + * Enable more outer joins with reference tables + + * Execute CREATE INDEX CONCURRENTLY in parallel + + * Treat functions as transaction blocks + + * Add support for column aliases on join clauses + + * Skip standard_planner() for trivial queries + + * Added support for function calls in joins + + * Round-robin task assignment policy relies on local transaction id + + * Relax subquery union pushdown restrictions for reference tables + + * Speed-up run_command_on_shards() + + * Address some memory issues in connection config + + * Restrict visibility of get_*_active_transactions functions to pg_monitor + + * Don't do redundant ALTER TABLE consistency checks at coordinator + + * Queries with only intermediate results do not rely on task assignment policy + + * Finish connection establishment in parallel for multiple connections + + * Fixes a bug related to pruning shards using a coerced value + + * Fix an issue with some DECLARE .. CURSOR WITH HOLD commands + + * Fixes a bug that could lead to infinite recursion during recursive planning + + * Fixes a bug that could prevent planning full outer joins with using clause + + * Fixes a bug that could lead to memory leak on citus_relation_size + + * Fixes a problem that could cause segmentation fault with recursive planning + + * Switch CI solution to CircleCI + + -- Burak Velioglu Fri, 29 Mar 2019 07:36:09 +0000 + +citus (8.0.3.citus-1) stable; urgency=low + + * Fixes maintenance daemon panic due to unreleased spinlock + + * Fixes an issue with having clause when used with complex joins + + -- Hanefi Onaldi Wed, 9 Jan 2019 9:50:07 +0000 + +citus (8.1.1.citus-1) stable; urgency=low + + * Fixes maintenance daemon panic due to unreleased spinlock + + * Fixes an issue with having clause when used with complex joins + + -- Hanefi Onaldi Mon, 7 Jan 2019 16:26:13 +0000 + +citus (8.1.0.citus-1) stable; urgency=low + + * Turns on ssl by default for new installations of citus + + * Restricts SSL Ciphers to TLS1.2 and above + + * Adds support for INSERT INTO SELECT..ON CONFLICT/RETURNING via coordinator + + * Adds support for round-robin task assignment for queries to reference tables + + * Adds support for SQL tasks using worker_execute_sql_task UDF with task-tracker + + * Adds support for VALIDATE CONSTRAINT queries + + * Adds support for disabling hash aggregate with HLL + + * Adds user ID suffix to intermediate files generated by task-tracker + + * Only allow transmit from pgsql_job_cache directory + + * Disallows GROUPING SET clauses in subqueries + + * Removes restriction on user-defined group ID in node addition functions + + * Relaxes multi-shard modify locks when enable_deadlock_prevention is disabled + + * Improves security in task-tracker protocol + + * Improves permission checks in internal DROP TABLE functions + + * Improves permission checks in cluster management functions + + * Cleans up UDFs and fixes permission checks + + * Fixes crashes caused by stack size increase under high memory load + + * Fixes a bug that could cause maintenance daemon panic + + -- Burak Velioglu Tue, 18 Dec 2018 15:12:45 +0000 + +citus (8.0.2.citus-1) stable; urgency=low + + * Fixes a bug that could cause maintenance daemon panic + + * Fixes crashes caused by stack size increase under high memory load + + -- Burak Velioglu Thu, 13 Dec 2018 13:56:44 +0000 + +citus (7.5.4.citus-1) stable; urgency=low + + * Fixes a bug that could cause maintenance daemon panic + + -- Burak Velioglu Wed, 12 Dec 2018 11:45:24 +0000 + +citus (8.0.1.citus-1) stable; urgency=low + + * Execute SQL tasks using worker_execute_sql_task UDF when using task-tracker + + -- Burak Velioglu Wed, 28 Nov 2018 11:38:47 +0000 + +citus (7.5.3.citus-1) stable; urgency=low + + * Execute SQL tasks using worker_execute_sql_task UDF when using task-tracker + + -- Burak Velioglu Wed, 28 Nov 2018 10:52:20 +0000 + +citus (7.5.2.citus-1) stable; urgency=low + + * Fixes inconsistent metadata error when shard metadata caching get interrupted + + * Fixes a bug that could cause memory leak + + * Fixes a bug that prevents recovering wrong transactions in MX + + * Fixes a bug to prevent wrong memory accesses on Citus MX under very high load + + * Fixes crashes caused by stack size increase under high memory load + + -- Burak Velioglu Wed, 14 Nov 2018 20:42:16 +0000 + +citus (8.0.0.citus-1) stable; urgency=low + + * Adds support for PostgreSQL 11 + + * Adds support for applying DML operations on reference tables from MX nodes + + * Adds distributed locking to truncated MX tables + + * Adds support for running TRUNCATE command from MX worker nodes + + * Adds views to provide insight about the distributed transactions + + * Adds support for TABLESAMPLE in router queries + + * Adds support for INCLUDE option in index creation + + * Adds option to allow simple DML commands from hot standby + + * Adds support for partitioned tables with replication factor > 1 + + * Prevents a deadlock on concurrent DROP TABLE and SELECT on Citus MX + + * Fixes a bug that prevents recovering wrong transactions in MX + + * Fixes a bug to prevent wrong memory accesses on Citus MX under very high load + + * Fixes a bug in MX mode, calling DROP SCHEMA with existing partitioned table + + * Fixes a bug that could cause modifying CTEs to select wrong execution mode + + * Fixes a bug preventing rollback in CREATE PROCEDURE + + * Fixes a bug on not being able to drop index on a partitioned table + + * Fixes a bug on TRUNCATE when there is a foreign key to a reference table + + * Fixes a performance issue in prepared INSERT..SELECT + + * Fixes a bug which causes errors on DROP DATABASE IF EXISTS + + * Fixes a bug to remove intermediate result directory in pull-push execution + + * Improves query pushdown planning performance + + * Evaluate functions anywhere in query + + -- Burak Velioglu Fri, 02 Nov 2018 08:06:42 +0000 + +citus (7.5.1.citus-1) stable; urgency=low + + * Improves query pushdown planning performance + + * Fixes a bug that could cause modifying CTEs to select wrong execution mode + + -- Burak Velioglu Wed, 29 Aug 2018 08:06:42 +0000 + +citus (7.4.2.citus-1) stable; urgency=low + + * Fixes a segfault in real-time executor during online shard move + + -- Mehmet Furkan Sahin Fri, 27 Jul 2018 13:42:27 +0000 + +citus (7.5.0.citus-1) stable; urgency=low + + * Adds foreign key support from hash distributed to reference tables + + * Adds SELECT ... FOR UPDATE support for router plannable queries + + * Adds support for non-partition columns in count distinct + + * Fixes a segfault in real-time executor during online shard move + + * Fixes ALTER TABLE ADD COLUMN constraint check + + * Fixes a bug where INSERT ... SELECT allows one to update dist. column + + * Allows DDL commands to be sequentialized via citus.multi_shard_modify_mode + + * Adds support for topn_union_agg and topn_add_agg across shards + + * Adds support for hll_union_agg and hll_add_agg across shards + + * Fixes a bug that might cause shards to have a wrong owner + + * Adds select_opens_transaction_block GUC + + * Adds utils to implement DDLs for policies in future + + * Makes intermediate results to use separate connections + + * Adds a node_conninfo GUC to set outgoing connection settings + + -- Mehmet Furkan Sahin Wed, 25 Jul 2018 9:32:24 +0000 + +citus (6.2.6.citus-1) stable; urgency=low + + * Adds support for respecting enable_hashagg in the master planner + + -- Burak Velioglu Fri, 06 Jul 2018 13:30:08 +0000 + +citus (7.4.1.citus-1) stable; urgency=low + + * Fixes a bug that could cause txns to incorrectly proceed after failure + + * Fixes a bug on INSERT ... SELECT queries in prepared statements + + -- Burak Velioglu Wed, 20 Jun 2018 12:25:30 +0000 + +citus (7.2.2.citus-1) stable; urgency=low + + * Fixes a bug that could cause SELECTs to crash during a rebalance + + -- Burak Velioglu Thu, 17 May 2018 11:51:56 +0000 + +citus (7.4.0.citus-1) stable; urgency=low + + * Adds support for non-pushdownable subqueries and CTEs in UPDATE/DELETE + + * Adds support for pushdownable subqueries and joins in UPDATE/DELETE + + * Adds faster shard pruning for subqueries + + * Adds partitioning support to MX table + + * Adds support for (VACUUM | ANALYZE) VERBOSE + + * Adds support for multiple ANDs in HAVING for pushdown planner + + * Adds support for quotation needy schema names + + * Improves operator check time in physical planner for custom data types + + * Removes broadcast join logic + + * Deprecates large_table_shard_count and master_expire_table_cache() + + * Modifies master_update_node to write-lock shards hosted by node over update + + * DROP TABLE now drops shards as the currrent user instead of the superuser + + * Adds specialised error codes for connection failures + + * Improves error messages on connection failure + + * Fixes issue which prevented multiple citus_table_size calls per query + + * Tests are updated to use create_distributed_table + + -- Burak Velioglu Tue, 15 May 2018 13:01:17 +0000 + +citus (7.3.0.citus-1) stable; urgency=low + + * Adds support for non-colocated joins between subqueries + + * Adds support for window functions that can be pushed down to worker + + * Adds support for modifying CTEs + + * Adds recursive plan for WHERE clause subqueries with recurring FROM clause + + * Adds support for bool_ and bit_ aggregates + + * Adds support for Postgres jsonb and json aggregation functions + + * Adds support for respecting enable_hashagg in the master plan + + * Performance improvements to reduce distributed planning time + + * Fixes a bug on planner when aggregate is used in ORDER BY + + * Fixes a bug on planner when DISTINCT (ON) clause is used with GROUP BY + + * Fixes a planner bug with distinct and aggregate clauses + + * Fixes a bug that opened new connections on each table size function call + + * Fixes a bug canceling backends not involved in distributed deadlocks + + * Fixes count distinct bug on column expressions when used with subqueries + + * Improves error handling on worker node failures + + * Improves error messages for INSERT queries that have subqueries + + -- Burak Velioglu Thu, 15 Mar 2018 14:16:10 +0000 + +citus (7.2.1.citus-1) stable; urgency=low + + * Fixes count distinct bug on column expressions when used with subqueries + + * Adds support for respecting enable_hashagg in the master plan + + * Fixes a bug canceling backends not involved in distributed deadlocks + + -- Burak Velioglu Tue, 06 Feb 2018 14:46:07 +0000 + +citus (7.2.0.citus-1) stable; urgency=low + + * Adds support for CTEs + + * Adds support for subqueries that require merge step + + * Adds support for set operations (UNION, INTERSECT, ...) + + * Adds support for 2PC auto-recovery + + * Adds support for querying local tables in CTEs and subqueries + + * Adds support for more SQL coverage in subqueries for reference tables + + * Adds support for count(distinct) in queries with a subquery + + * Adds support for non-equijoins when there is already an equijoin + + * Adds support for real-time executor to run in transaction blocks + + * Adds infrastructure for storing intermediate distributed query results + + * Adds a new GUC named enable_repartition_joins for auto executor switch + + * Adds support for limiting the intermediate result size + + * Improves support for queries with unions containing filters + + * Improves support for queries with unions containing joins + + * Improves support for subqueries in the WHERE clause + + * Increases COPY throughput + + * Enables pushing down queries containing only recurring tuples and GROUP BY + + * Load-balance queries that read from 0 shards + + * Improves support for using functions in subqueries + + * Fixes a bug that causing real-time executor to crash during cancellation + + * Fixes a bug that causing real-time executor to get stuck on cancellation + + * Fixes a bug that could block modification queries unnecessarily + + * Fixes a bug that could cause assigning wrong IDs to transactions + + * Fixes a bug that could cause an assert failure with ANALYZE statements + + * Fixes a bug that would push down wrong set operations in subqueries + + * Fixes a bug that could cause a deadlock in create_distributed_table + + * Fixes a bug that could confuse user about ANALYZE usage + + * Fixes a bug causing false positive distributed deadlock detections + + * Relaxes the locking for DDL commands on partitioned tables + + * Relaxes the locking on COPY with replication + + * Logs more remote commands when citus.log_remote_commands is set + + -- Burak Velioglu Tue, 16 Jan 2018 14:34:20 +0000 + +citus (6.2.5.citus-1) stable; urgency=low + + * Fixes a bug that could crash the coordinator while reporting a remote error + + -- Burak Velioglu Thu, 11 Jan 2018 11:40:28 +0000 + +citus (7.1.2.citus-1) stable; urgency=low + + * Fixes a bug that could cause assigning wrong IDs to transactions + + * Increases COPY throughput + + -- Burak Velioglu Fri, 05 Jan 2018 09:00:07 +0000 + +citus (7.1.1.citus-1) stable; urgency=low + + * Fixes a bug preventing pushing down subqueries with reference tables + + * Fixes a bug that could create false positive distributed deadlocks + + * Fixes a bug that could prevent running concurrent COPY and multi-shard DDL + + * Fixes a bug that could mislead users about ANALYZE queries + + -- Burak Velioglu Tue, 05 Dec 2017 09:00:07 +0000 + +citus (7.1.0.citus-1) stable; urgency=low + + * Adds support for native queries with multi shard UPDATE/DELETE queries + + * Expands reference table support in subquery pushdown + + * Adds window function support for subqueries and INSERT ... SELECT queries + + * Adds support for COUNT(DISTINCT) [ON] queries on non-partition columns + + * Adds support for DISTINCT [ON] queries on non-partition columns + + * Introduces basic usage statistic collector + + * Adds support for setting replica identity while creating distributed tables + + * Adds support for ALTER TABLE ... REPLICA IDENTITY queries + + * Adds pushdown support for LIMIT and HAVING grouped by partition key + + * Adds support for INSERT ... SELECT queries via worker nodes on MX clusters + + * Adds support for adding primary key using already defined index + + * Adds replication parameter to shard copy functions + + * Changes shard_name UDF to omit public schema name + + * Adds master_move_node UDF to make changes on nodename/nodeport more easy + + * Fixes a bug that could cause casting error with INSERT ... SELECT queries + + * Fixes a bug that could prevent upgrading servers from Citus 6.1 + + * Fixes a bug that could prevent attaching partitions to a table in schema + + * Fixes a bug preventing adding nodes to clusters with reference tables + + * Fixes a bug that could cause a crash with INSERT ... SELECT queries + + * Fixes a bug that could prevent creating a partitoned table on Cloud + + * Implements various performance improvements + + * Adds internal infrastructures and tests to improve development process + + * Addresses various race conditions and deadlocks + + * Improves and standardizes error messages + + -- Burak Velioglu Wed, 15 Nov 2017 09:00:07 +0000 + +citus (7.0.3.citus-1) stable; urgency=low + + * Fixes several bugs that could cause crash + + * Fixes a bug that could cause deadlock while creating reference tables + + * Fixes a bug that could cause false-positives in deadlock detection + + * Fixes a bug that could cause 2PC recovery not to work from MX workers + + * Fixes a bug that could cause cache incohorency + + * Fixes a bug that could cause maintenance daemon to skip cache invalidations + + * Improves performance of transaction recovery by using correct index + + -- Burak Yucesoy Mon, 16 Oct 2017 11:52:07 +0000 + +citus (7.0.2.citus-1) stable; urgency=low + + * Updates task-tracker to limit file access + + -- Burak Yucesoy Thu, 28 Sep 2017 22:29:01 +0000 + +citus (6.2.4.citus-1) stable; urgency=low + + * Updates task-tracker to limit file access + + -- Burak Yucesoy Thu, 28 Sep 2017 21:31:35 +0000 + +citus (6.1.3.citus-1) stable; urgency=low + + * Updates task-tracker to limit file access + + -- Burak Yucesoy Thu, 28 Sep 2017 20:31:35 +0000 + +citus (7.0.1.citus-1) stable; urgency=low + + * Fixes a bug that could cause memory leaks in INSERT ... SELECT queries + + * Fixes a bug that could cause incorrect execution of prepared statements + + * Fixes a bug that could cause excessive memory usage during COPY + + * Incorporates latest changes from core PostgreSQL code + + -- Burak Yucesoy Tue, 12 Sep 2017 17:53:50 +0000 + +citus (7.0.0.citus-1) stable; urgency=low + + * Adds support for PostgreSQL 10 + + * Drops support for PostgreSQL 9.5 + + * Adds support for multi-row INSERT + + * Adds support for router UPDATE and DELETE queries with subqueries + + * Adds infrastructure for distributed deadlock detection + + * Deprecates enable_deadlock_prevention flag + + * Adds support for partitioned tables + + * Adds support for creating UNLOGGED tables + + * Adds support for SAVEPOINT + + * Adds UDF citus_create_restore_point for taking distributed snapshots + + * Adds support for evaluating non-pushable INSERT ... SELECT queries + + * Adds support for subquery pushdown on reference tables + + * Adds shard pruning support for IN and ANY + + * Adds support for UPDATE and DELETE commands that prune down to 0 shard + + * Enhances transaction support by relaxing some transaction restrictions + + * Fixes a bug causing crash if distributed table has no shards + + * Fixes a bug causing crash when removing inactive node + + * Fixes a bug causing failure during COPY on tables with dropped columns + + * Fixes a bug causing failure during DROP EXTENSION + + * Fixes a bug preventing executing VACUUM and INSERT concurrently + + * Fixes a bug in prepared INSERT statements containing an implicit cast + + * Fixes several issues related to statement cancellations and connections + + * Fixes several 2PC related issues + + * Removes an unnecessary dependency causing warning messages in pg_dump + + * Adds internal infrastructure for follower clusters + + * Adds internal infrastructure for progress tracking + + * Implements various performance improvements + + * Adds internal infrastructures and tests to improve development process + + * Addresses various race conditions and deadlocks + + * Improves and standardizes error messages + + -- Burak Yucesoy Mon, 28 Aug 2017 12:27:53 +0000 + +citus (6.2.3.citus-1) stable; urgency=low + + * Fixes a crash during execution of local CREATE INDEX CONCURRENTLY + + * Fixes a bug preventing usage of quoted column names in COPY + + * Fixes a bug in prepared INSERTs with implicit cast in partition column + + * Relaxes locks in VACUUM to ensure concurrent execution with INSERT + + -- Burak Yucesoy Thu, 13 Jul 2017 11:27:17 +0000 + +citus (6.2.2.citus-1) stable; urgency=low + + * Fixes a common cause of deadlocks when repairing tables with foreign keys + + -- Burak Velioglu Wed, 07 Jun 2017 09:42:17 +0000 + +citus (6.1.2.citus-1) stable; urgency=low + + * Fixes a common cause of deadlocks when repairing tables with foreign keys + + -- Jason Petersen Wed, 31 May 2017 16:14:11 +0000 + +citus (6.2.1.citus-1) stable; urgency=low + + * Relaxes version-check logic to avoid breaking non-distributed commands + + -- Jason Petersen Wed, 24 May 2017 22:36:07 +0000 + +citus (6.2.0.citus-1) stable; urgency=low + + * Increases SQL subquery coverage by pushing down more kinds of queries + + * Adds CustomScan API support to allow read-only transactions + + * Adds support for CREATE/DROP INDEX CONCURRENTLY + + * Adds support for ALTER TABLE ... ADD CONSTRAINT + + * Adds support for ALTER TABLE ... RENAME COLUMN + + * Adds support for DISABLE/ENABLE TRIGGER ALL + + * Adds support for expressions in the partition column in INSERTs + + * Adds support for query parameters in combination with function evaluation + + * Adds support for creating distributed tables from non-empty local tables + + * Adds UDFs to get size of distributed tables + + * Adds UDFs to add a new node without replicating reference tables + + * Adds checks to prevent running Citus binaries with wrong metadata tables + + * Improves shard pruning performance for range queries + + * Improves planner performance for joins involving co-located tables + + * Improves shard copy performance by creating indexes after copy + + * Improves task-tracker performance by batching several status checks + + * Enables router planner for queries on range partitioned table + + * Changes TRUNCATE to drop local data only if enable_ddl_propagation is off + + * Starts to execute DDL on coordinator before workers + + * Fixes a bug causing incorrectly reading invalidated cache + + * Fixes a bug related to creation of schemas in workers with incorrect owner + + * Fixes a bug related to concurrent run of shard drop functions + + * Fixes a bug related to EXPLAIN ANALYZE with DML queries + + * Fixes a bug related to SQL functions in FROM clause + + * Adds a GUC variable to report cross shard queries + + * Fixes a bug related to partition columns without native hash function + + * Adds internal infrastructures and tests to improve development process + + * Addresses various race conditions and deadlocks + + * Improves and standardizes error messages + + -- Burak Yucesoy Tue, 16 May 2017 16:05:22 +0000 + +citus (6.1.1.citus-1) stable; urgency=low + + * Fixes a crash caused by router executor use after connection timeouts + + * Fixes a crash caused by relation cache invalidation during COPY + + * Fixes bug related to DDL use within PL/pgSQL functions + + * Fixes a COPY bug related to types lacking binary output functions + + * Fixes a bug related to modifications with parameterized partition values + + * Fixes improper value interpolation in worker sequence generation + + * Guards shard pruning logic against zero-shard tables + + * Fixes possible NULL pointer dereference and buffer underflow, via PVS-Studio + + * Fixes a INSERT...SELECT bug that could push down non-partition column JOINs + + -- Metin Doslu Fri, 5 May 2017 17:42:00 +0000 + +citus (6.1.0.citus-1) stable; urgency=low + + * Implements reference tables, transactionally replicated to all nodes + + * Adds upgrade_to_reference_table UDF to upgrade pre-6.1 reference tables + + * Expands prepared statement support to nearly all statements + + * Adds support for creating VIEWs which reference distributed tables + + * Adds targeted VACUUM/ANALYZE support + + * Adds support for the FILTER clause in aggregate expressions + + * Adds support for function evaluation within INSERT INTO ... SELECT + + * Adds support for creating foreign key constraints with ALTER TABLE + + * Adds logic to choose router planner for all queries it supports + + * Enhances create_distributed_table with parameter for explicit colocation + + * Adds generally useful utility UDFs previously available as "Citus Tools" + + * Adds user-facing UDFs for locking shard resources and metadata + + * Refactors connection and transaction management for more consistency + + * Enhances COPY with fully transactional semantics + + * Improves support for cancellation for a number of queries and commands + + * Adds column_to_column_name UDF to help users understand partkey values + + * Adds master_disable_node UDF for temporarily disabling nodes + + * Adds proper MX ("masterless") metadata propagation logic + + * Adds start_metadata_sync_to_node UDF to propagate metadata changes to nodes + + * Enhances SERIAL compatibility with MX tables + + * Adds an node_connection_timeout parameter to set node connection timeouts + + * Adds enable_deadlock_prevention setting to permit multi-node transactions + + * Adds a replication_model setting to specify replication of new tables + + * Changes the shard_replication_factor setting's default value to one + + * Adds code to automatically set max_prepared_transactions if not configured + + * Accelerates lookup of colocated shard placements + + * Fixes a bug affecting INSERT INTO ... SELECT queries using constant values + + * Fixes a bug by ensuring COPY does not mark placements inactive + + * Fixes a bug affecting reads from pg_dist_shard_placement table + + * Fixes a crash triggered by creating a foreign key without a column + + * Fixes a crash related to accessing catalog tables after aborted transaction + + * Fixes a bug affecting JOIN queries requiring repartitions + + * Fixes a bug affecting node insertions to pg_dist_node table + + * Fixes a crash triggered by queries with modifying common table expressions + + * Fixes a bug affecting workloads with concurrent shard appends and deletions + + * Addresses various race conditions and deadlocks + + * Improves and standardizes error messages + + -- Burak Yucesoy Thu, 9 Feb 2017 16:17:41 +0000 + +citus (6.0.1.citus-3) stable; urgency=low + + * First build using new versioning practices + + -- Jason Petersen Wed, 8 Feb 2017 23:19:46 +0000 + +citus (6.0.1.citus-2) stable; urgency=low + + * Transitional package to guide users to new package name + + -- Jason Petersen Mon, 6 Feb 2017 16:33:44 +0000 + +citus (6.0.1.citus-1) stable; urgency=low + + * Fixes a bug causing failures during pg_upgrade + + * Fixes a bug preventing DML queries during colocated table creation + + * Fixes a bug that caused NULL parameters to be incorrectly passed as text + + -- Burak Yucesoy Wed, 30 Nov 2016 15:27:38 +0000 + +citus (6.0.0.citus-1) stable; urgency=low + + * Adds compatibility with PostgreSQL 9.6, now the recommended version + + * Removes the pg_worker_list.conf file in favor of a pg_dist_node table + + * Adds master_add_node and master_add_node UDFs to manage membership + + * Removes the \stage command and corresponding csql binary in favor of COPY + + * Removes copy_to_distributed_table in favor of first-class COPY support + + * Adds support for multiple DDL statements within a transaction + + * Adds support for certain foreign key constraints + + * Adds support for parallel INSERT INTO ... SELECT against colocated tables + + * Adds support for the TRUNCATE command + + * Adds support for HAVING clauses in SELECT queries + + * Adds support for EXCLUDE constraints which include the partition column + + * Adds support for system columns in queries (tableoid, ctid, etc.) + + * Adds support for relation name extension within INDEX definitions + + * Adds support for no-op UPDATEs of the partition column + + * Adds several general-purpose utility UDFs to aid in Citus maintenance + + * Adds master_expire_table_cache UDF to forcibly expire cached shards + + * Parallelizes the processing of DDL commands which affect distributed tables + + * Adds support for repartition jobs using composite or custom types + + * Enhances object name extension to handle long names and large shard counts + + * Parallelizes the master_modify_multiple_shards UDF + + * Changes distributed table creation to error if target table is not empty + + * Changes the pg_dist_shard.logicalrelid column from an oid to regclass + + * Adds a placementid column to pg_dist_shard_placement, replacing Oid use + + * Removes the pg_dist_shard.shardalias distribution metadata column + + * Adds pg_dist_partition.repmodel to track tables using streaming replication + + * Adds internal infrastructure to take snapshots of distribution metadata + + * Addresses the need to invalidate prepared statements on metadata changes + + * Adds a mark_tables_colocated UDF for denoting pre-6.0 manual colocation + + * Fixes a bug affecting prepared statement execution within PL/pgSQL + + * Fixes a bug affecting COPY commands using composite types + + * Fixes a bug that could cause crashes during EXPLAIN EXECUTE + + * Separates worker and master job temporary folders + + * Eliminates race condition between distributed modification and repair + + * Relaxes the requirement that shard repairs also repair colocated shards + + * Implements internal functions to track which tables' shards are colocated + + * Adds pg_dist_partition.colocationid to track colocation group membership + + * Extends shard copy and move operations to respect colocation settings + + * Adds pg_dist_local_group to prepare for future MX-related changes + + * Adds create_distributed_table to easily create shards and infer colocation + + -- Jason Petersen Tue, 8 Nov 2016 19:45:45 +0000 + +citus (5.2.2.citus-1) stable; urgency=low + + * Adds support for IF NOT EXISTS clause of CREATE INDEX command + + * Adds support for RETURN QUERY and FOR ... IN PL/pgSQL features + + * Extends the router planner to handle more queries + + * Changes COUNT of zero-row sets to return 0 rather than an empty result + + * Reduces the minimum permitted task_tracker_delay to a single millisecond + + * Fixes a bug that caused crashes during joins with a WHERE false clause + + * Fixes a bug triggered by unique violation errors raised in long txns + + * Fixes a bug resulting in multiple registration of transaction callbacks + + * Fixes a bug which could result in stale reads of distribution metadata + + * Fixes a bug preventing distributed modifications in some PL/pgSQL functions + + * Fixes some code paths that could hypothetically read uninitialized memory + + * Lowers log level of "waiting for activity" messages + + -- Jason Petersen Tue, 8 Nov 2016 18:43:37 +0000 + +citus (5.2.1.citus-1) stable; urgency=low + + * Fixes subquery pushdown to properly extract outer join qualifiers + + * Addresses possible memory leak during multi-shard transactions + + -- Jason Petersen Tue, 6 Sep 2016 20:47:15 +0000 + +citus (5.2.0.citus-1) stable; urgency=low + + * Drops support for PostgreSQL 9.4; PostgreSQL 9.5 is required + + * Adds schema support for tables, named objects (types, operators, etc.) + + * Evaluates non-immutable functions on master in all modification commands + + * Adds support for SERIAL types in non-partition columns + + * Adds support for RETURNING clause in INSERT, UPDATE, and DELETE commands + + * Adds support for multi-statement transactions using a fixed set of nodes + + * Full SQL support for SELECT queries which can be executed on single worker + + * Adds option to perform DDL changes using prepared transactions (2PC) + + * Adds an enable_ddl_propagation parameter to control DDL propagation + + * Accelerates shard pruning during merges + + * Adds master_modify_multiple_shards UDF to modify many shards at once + + * Adds COPY support for arrays of user-defined types + + * Now supports parameterized prepared statements for certain use cases + + * Extends LIMIT/OFFSET support to all executor types + + * Constraint violations now fail fast rather than hitting all placements + + * Makes master_create_empty_shard aware of shard placement policy + + * Reduces unnecessary sleep during queries processed by real-time executor + + * Improves task tracker executor's task cleanup logic + + * Relaxes restrictions on cancellation of DDL commands + + * Removes ONLY keyword from worker SELECT queries + + * Error message improvements and standardization + + * Moves master_update_shard_statistics function to pg_catalog schema + + * Fixes a bug where hash-partitioned anti-joins could return bad results + + * Now sets storage type correctly for foreign table-backed shards + + * Fixes master_update_shard_statistics issue with hash-partitioned tables + + * Fixes an issue related to extending table names that require escaping + + * Reduces risk of row counter overflows during modifications + + * Fixes a crash related to FILTER clause use in COUNT DISTINCT subqueries + + * Fixes crashes related to partition columns with high attribute numbers + + * Fixes certain subquery and join crashes + + * Detects flex for build even if PostgreSQL was built without it + + * Fixes assert-enabled crash when all_modifications_commutative is true + + -- Jason Petersen Wed, 17 Aug 2016 10:23:21 +0000 + +citus (5.2.0~rc.1-1) testing; urgency=low + + * Release candidate for 5.2. + + -- Jason Petersen Mon, 01 Aug 2016 17:01:05 +0000 + +citus (5.1.1-1) stable; urgency=low + + * Adds complex count distinct expression support in repartitioned subqueries + + * Improves task tracker job cleanup logic, addressing a memory leak + + * Fixes bug that generated incorrect results for LEFT JOIN queries + + * Improves compatibility with Debian's reproducible builds project + + * Fixes build issues on FreeBSD platforms + + -- Jason Petersen Fri, 17 Jun 2016 16:20:15 +0000 + +citus (5.1.0-1) stable; urgency=low + + * Adds distributed COPY to rapidly populate distributed tables + + * Adds support for using EXPLAIN on distributed queries + + * Recognizes and fast-paths single-shard SELECT statements automatically + + * Increases INSERT throughput via shard pruning optimizations + + * Improves planner performance for joins involving tables with many shards + + * Adds ability to pass columns as arguments to function calls in UPDATEs + + * Introduces transaction manager for use by multi-shard commands + + * Adds COUNT(DISTINCT ...) pushdown optimization for hash-partitioned tables + + * Adds support for some UNIQUE indexes on hash- or range-partitioned tables + + * Deprecates \stage in favor of using COPY for append-partition tables + + * Deprecates copy_to_distributed_table in favor of first-class COPY support + + * Fixes build problems when using non-packaged PostgreSQL installs + + * Fixes bug that sometimes skipped pruning when partitioned by VARCHAR column + + * Fixes bug impeding use of user functions in repartitioned subqueries + + * Fixes bug involving queries with equality comparisons of boolean types + + * Fixes crash that prevented use alongside pg_stat_statements + + * Fixes crash arising from SELECT queries that lack a target list + + * Improves warning and error messages + + -- Jason Petersen Tue, 17 May 2016 16:55:02 +0000 + +citus (5.1.0~rc.2-1) testing; urgency=low + + * Fix EXPLAIN output when FORMAT JSON in use + + -- Jason Petersen Mon, 16 May 2016 11:08:09 +0000 + +citus (5.1.0~rc.1-1) testing; urgency=low + + * Release candidate for 5.1. + + -- Jason Petersen Wed, 04 May 2016 19:26:23 +0000 + +citus (5.0.1-1) stable; urgency=low + + * Fixes issues on 32-bit systems + + -- Jason Petersen Fri, 15 Apr 2016 19:17:35 +0000 + +citus (5.0.0-1) stable; urgency=low + + * Initial release + + -- Jason Petersen Thu, 24 Mar 2016 10:12:52 -0400 diff --git a/packaging_automation/tests/files/debian/changelog b/packaging_automation/tests/files/debian/changelog index ffdce6ed..43cc3b2f 100644 --- a/packaging_automation/tests/files/debian/changelog +++ b/packaging_automation/tests/files/debian/changelog @@ -1,2155 +1,2155 @@ -citus (10.1.4.citus-1) stable; urgency=low - - * Official 10.1.4 release of Citus - - -- Gurkan Indibay Tue, 01 Feb 2022 11:49:11 +0000 - -citus (10.2.3.citus-1) stable; urgency=low - - * Official 10.2.3 release of Citus - - -- Gurkan Indibay Mon, 29 Nov 2021 11:00:41 +0000 - -citus (10.0.6.citus-1) stable; urgency=low - - * Official 10.0.6 release of Citus - - -- Gurkan Indibay Fri, 12 Nov 2021 11:37:25 +0000 - -citus (9.5.10.citus-1) stable; urgency=low - - * Official 9.5.10 release of Citus - - -- Gurkan Indibay Mon, 08 Nov 2021 14:18:56 +0000 - -citus (9.2.8.citus-1) stable; urgency=low - - * Official 9.2.8 release of Citus - - -- Gurkan Indibay Thu, 04 Nov 2021 12:45:09 +0000 - -citus (9.2.7.citus-1) stable; urgency=low - - * Official 9.2.7 release of Citus - - -- Gurkan Indibay Wed, 03 Nov 2021 09:34:30 +0000 - -citus (10.2.2.citus-1) stable; urgency=low - - * Official 10.2.2 release of Citus - - -- Gurkan Indibay Thu, 14 Oct 2021 13:00:27 +0000 - -citus (10.2.1.citus-1) stable; urgency=low - - * Adds missing version-mismatch checks for columnar tables - - * Adds missing version-mismatch checks for internal functions - - * Fixes a bug that could cause partition shards being not co-located with - parent shards - - * Fixes a bug that prevents pushing down boolean expressions when using - columnar custom scan - - * Fixes a clog lookup failure that could occur when writing to a columnar - table - - * Fixes an issue that could cause unexpected errors when there is an - in-progress write to a columnar table - - * Revokes read access to `columnar.chunk` from unprivileged user - - -- Gurkan Indibay Fri, 24 Sep 2021 12:03:35 +0000 - -citus (10.1.3.citus-1) stable; urgency=low - - * Official 10.1.3 release of Citus - - -- Gurkan Indibay Fri, 17 Sep 2021 17:23:05 +0000 - -citus (10.2.0.citus-1) stable; urgency=low - - * Adds PostgreSQL 14 support - - * Adds hash & btree index support for columnar tables - - * Adds helper UDFs for easy time partition management: - `get_missing_time_partition_ranges`, `create_time_partitions`, and - `drop_old_time_partitions` - - * Adds propagation of ALTER SEQUENCE - - * Adds support for ALTER INDEX ATTACH PARTITION - - * Adds support for CREATE INDEX ON ONLY - - * Allows more graceful failovers when replication factor > 1 - - * Enables chunk group filtering to work with Params for columnar tables - - * Enables qual push down for joins including columnar tables - - * Enables transferring of data using binary encoding by default on PG14 - - * Improves `master_update_table_statistics` and provides distributed deadlock - detection - - * Includes `data_type` and `cache` in sequence definition on worker - - * Makes start/stop_metadata_sync_to_node() transactional - - * Makes sure that table exists before updating table statistics - - * Prevents errors with concurrent `citus_update_table_statistics` and DROP table - - * Reduces memory usage of columnar table scans by freeing the memory used for - last stripe read - - * Shows projected columns for columnar tables in EXPLAIN output - - * Speeds up dropping partitioned tables - - * Synchronizes hasmetadata flag on mx workers - - * Uses current user while syncing metadata - - * Adds a parameter to cleanup metadata when metadata syncing is stopped - - * Fixes a bug about int and smallint sequences on MX - - * Fixes a bug that cause partitions to have wrong distribution key after - DROP COLUMN - - * Fixes a bug that caused `worker_append_table_to_shard` to write as superuser - - * Fixes a bug that caused `worker_create_or_alter_role` to crash with NULL input - - * Fixes a bug that causes pruning incorrect shard of a range distributed table - - * Fixes a bug that may cause crash while aborting transaction - - * Fixes a bug that prevents attaching partitions when colocated foreign key - exists - - * Fixes a bug with `nextval('seq_name'::text)` - - * Fixes a crash in shard rebalancer when no distributed tables exist - - * Fixes a segfault caused by use after free in when using a cached connection - - * Fixes a UNION pushdown issue - - * Fixes a use after free issue that could happen when altering a distributed - table - - * Fixes showing target shard size in the rebalance progress monitor - - -- Gurkan Indibay Thu, 16 Sep 2021 08:45:17 +0000 - -citus (10.1.2.citus-1) stable; urgency=low - - * Allows more graceful failovers when replication factor > 1 - - * Fixes a bug that causes partitions to have wrong distribution key after - `DROP COLUMN` - - -- Gurkan Indibay Tue, 17 Aug 2021 16:25:14 +0000 - -citus (10.0.5.citus-1) stable; urgency=low - - * Allows more graceful failovers when replication factor > 1 - - * Fixes a bug that causes partitions to have wrong distribution key after - `DROP COLUMN` - - * Improves citus_update_table_statistics and provides distributed deadlock - detection - - -- Gurkan Indibay Tue, 17 Aug 2021 14:42:54 +0000 - -citus (9.5.7.citus-1) stable; urgency=low - - * Allows more graceful failovers when replication factor > 1 - - * Fixes a bug that causes partitions to have wrong distribution key after - `DROP COLUMN` - - * Improves master_update_table_statistics and provides distributed deadlock - detection - - -- Gurkan Indibay Tue, 17 Aug 2021 13:18:11 +0000 - -citus (9.4.6.citus-1) stable; urgency=low - - * Allows more graceful failovers when replication factor > 1 - - * Improves master_update_table_statistics and provides distributed deadlock - detection - - -- Gurkan Indibay Wed, 11 Aug 2021 08:57:04 +0000 - -citus (10.1.1.citus-1) stable; urgency=low - - * Improves citus_update_table_statistics and provides distributed deadlock - detection - - * Fixes showing target shard size in the rebalance progress monitor - - -- Gurkan Indibay Fri, 06 Aug 2021 08:38:37 +0000 - -citus (10.1.0.citus-1) stable; urgency=low - - * Drops support for PostgreSQL 11 - - * Adds `shard_count` parameter to `create_distributed_table` function - - * Adds support for `ALTER DATABASE OWNER` - - * Adds support for temporary columnar tables - - * Adds support for using sequences as column default values when syncing - metadata - - * `alter_columnar_table_set` enforces columnar table option constraints - - * Continues to remove shards after failure in `DropMarkedShards` - - * Deprecates the `citus.replication_model` GUC - - * Enables `citus.defer_drop_after_shard_move` by default - - * Ensures free disk space before moving a shard - - * Fetches shard size on the fly for the rebalance monitor - - * Ignores old placements when disabling or removing a node - - * Implements `improvement_threshold` at shard rebalancer moves - - * Improves orphaned shard cleanup logic - - * Improves performance of `citus_shards` - - * Introduces `citus.local_hostname` GUC for connections to the current node - - * Makes sure connection is closed after each shard move - - * Makes sure that target node in shard moves is eligible for shard move - - * Optimizes partitioned disk size calculation for shard rebalancer - - * Prevents connection errors by properly terminating connections - - * Prevents inheriting a distributed table - - * Prevents users from dropping & truncating known shards - - * Pushes down `VALUES` clause as long as not in outer part of a `JOIN` - - * Reduces memory usage for multi-row inserts - - * Reduces memory usage while rebalancing shards - - * Removes length limits around partition names - - * Removes dependencies on the existence of public schema - - * Executor avoids opening extra connections - - * Excludes orphaned shards while finding shard placements - - * Preserves access method of materialized views when undistributing - or altering distributed tables - - * Fixes a bug that allowed moving of shards belonging to a reference table - - * Fixes a bug that can cause a crash when DEBUG4 logging is enabled - - * Fixes a bug that causes pruning incorrect shard of a range distributed table - - * Fixes a bug that causes worker_create_or_alter_role to crash with NULL input - - * Fixes a bug where foreign key to reference table was disallowed - - * Fixes a bug with local cached plans on tables with dropped columns - - * Fixes data race in `get_rebalance_progress` - - * Fixes `FROM ONLY` queries on partitioned tables - - * Fixes an issue that could cause citus_finish_pg_upgrade to fail - - * Fixes error message for local table joins - - * Fixes issues caused by omitting public schema in queries - - * Fixes nested `SELECT` query with `UNION` bug - - * Fixes null relationName bug at parallel execution - - * Fixes possible segfaults when using Citus in the middle of an upgrade - - * Fixes problems with concurrent calls of `DropMarkedShards` - - * Fixes shared dependencies that are not resident in a database - - * Fixes stale hostnames bug in prepared statements after `master_update_node` - - * Fixes the relation size bug during rebalancing - - * Fixes two race conditions in the get_rebalance_progress - - * Fixes using 2PC when it might be necessary - - -- Gurkan Indibay Fri, 16 Jul 2021 15:37:21 +0000 - -citus (10.0.4.citus-1) stable; urgency=low - - * Introduces `citus.local_hostname` GUC for connections to the current node - - * Removes dependencies on the existence of public schema - - * Removes limits around long partition names - - * Fixes a bug that can cause a crash when DEBUG4 logging is enabled - - * Fixes a bug that causes pruning incorrect shard of a range distributed table - - * Fixes an issue that could cause citus_finish_pg_upgrade to fail - - * Fixes FROM ONLY queries on partitioned tables - - * Fixes issues caused by public schema being omitted in queries - - * Fixes problems with concurrent calls of DropMarkedShards - - * Fixes relname null bug when using parallel execution - - * Fixes two race conditions in the get_rebalance_progress - - -- Gurkan Indibay Fri, 16 Jul 2021 10:58:55 +0000 - -citus (9.5.6.citus-1) stable; urgency=low - - * Fixes minor bug in `citus_prepare_pg_upgrade` that caused it to lose its - idempotency - - -- Gurkan Fri, 09 Jul 2021 13:30:42 +0000 - -citus (9.4.5.citus-1) stable; urgency=low - - * Adds a configure flag to enforce security - - * Avoids re-using connections for intermediate results - - * Fixes a bug that causes pruning incorrect shard of a range distributed table - - * Fixes a bug that might cause self-deadlocks when COPY used in TX block - - * Fixes an issue that could cause citus_finish_pg_upgrade to fail - - -- Gurkan Thu, 08 Jul 2021 10:15:23 +0000 - -citus (10.0.3.citus-1) stable; urgency=low - - * Prevents infinite recursion for queries that involve UNION ALL - below `JOIN` - - * Fixes a crash in queries with a modifying CTE and a SELECT - without `FROM` - - * Fixes upgrade and downgrade paths for citus_update_table_statistics - - * Fixes a bug that causes SELECT queries to use 2PC unnecessarily - - * Fixes a bug that might cause self-deadlocks with - `CREATE INDEX` / `REINDEX CONCURRENTLY` commands - - * Adds citus.max_cached_connection_lifetime GUC to set maximum connection - lifetime - - * Adds citus.remote_copy_flush_threshold GUC that controls - per-shard memory usages by `COPY` - - * Adds citus_get_active_worker_nodes UDF to deprecate - `master_get_active_worker_nodes` - - * Skips 2PC for readonly connections in a transaction - - * Makes sure that local execution starts coordinated transaction - - * Removes open temporary file warning when cancelling a query with - an open tuple store - - * Relaxes the locks when adding an existing node - - -- Gurkan Indibay Thu, 18 Mar 2021 01:40:08 +0000 - -citus (10.0.2.citus-1) stable; urgency=low - - * Adds a configure flag to enforce security - - * Fixes a bug due to cross join without target list - - * Fixes a bug with UNION ALL on PG 13 - - * Fixes a compatibility issue with pg_audit in utility calls - - * Fixes insert query with CTEs/sublinks/subqueries etc - - * Grants SELECT permission on citus_tables view to public - - * Grants SELECT permission on columnar metadata tables to public - - * Improves citus_update_table_statistics and provides distributed deadlock - detection - - * Preserves colocation with procedures in alter_distributed_table - - * Prevents using alter_columnar_table_set and alter_columnar_table_reset - on a columnar table not owned by the user - - * Removes limits around long table names - - -- Gurkan Indibay Thu, 4 Mar 2021 2:46:54 +0000 - -citus (9.5.2.citus-1) stable; urgency=low - - * Fixes distributed deadlock detection being blocked by metadata sync - - * Prevents segfaults when SAVEPOINT handling cannot recover from connection - failures - - * Fixes possible issues that might occur with single shard distributed tables - - -- gurkanindibay Wed, 27 Jan 2021 11:25:38 +0000 - -citus (9.4.4.citus-1) stable; urgency=low - - * Fixes a bug that could cause router queries with local tables to be pushed - down - - * Fixes a segfault in connection management due to invalid connection hash - entries - - * Fixes possible issues that might occur with single shard distributed tables - - -- gurkanindibay Tue, 5 Jan 2021 14:58:56 +0000 - -citus (9.5.1.citus-1) stable; urgency=low - - * Enables PostgreSQL's parallel queries on EXPLAIN ANALYZE - - * Fixes a bug that could cause excessive memory consumption when a partition is - created - - * Fixes a bug that triggers subplan executions unnecessarily with cursors - - * Fixes a segfault in connection management due to invalid connection hash - entries - - -- Onur Tirtir Wed, 2 Dec 2020 14:28:44 +0000 - -citus (9.4.3.citus-1) stable; urgency=low - - * Enables PostgreSQL's parallel queries on EXPLAIN ANALYZE - - * Fixes a bug that triggers subplan executions unnecessarily with cursors - - -- Onur Tirtir Tue, 24 Nov 2020 11:17:57 +0000 - -citus (9.5.0.citus-1) stable; urgency=low - - * Adds support for PostgreSQL 13 - - * Removes the task-tracker executor - - * Introduces citus local tables - - * Introduces undistribute_table UDF to convert tables back to postgres tables - - * Adds support for EXPLAIN (ANALYZE) EXECUTE and EXPLAIN EXECUTE - - * Adds support for EXPLAIN (ANALYZE, WAL) for PG13 - - * Sorts the output of EXPLAIN (ANALYZE) by execution duration. - - * Adds support for CREATE TABLE ... USING table_access_method - - * Adds support for WITH TIES option in SELECT and INSERT SELECT queries - - * Avoids taking multi-shard locks on workers - - * Enforces citus.max_shared_pool_size config in COPY queries - - * Enables custom aggregates with multiple parameters to be executed on workers - - * Enforces citus.max_intermediate_result_size in local execution - - * Improves cost estimation of INSERT SELECT plans - - * Introduces delegation of procedures that read from reference tables - - * Prevents pull-push execution for simple pushdownable subqueries - - * Improves error message when creating a foreign key to a local table - - * Makes citus_prepare_pg_upgrade idempotent by dropping transition tables - - * Disallows ON TRUE outer joins with reference & distributed tables when - reference table is outer relation to avoid incorrect results - - * Disallows field indirection in INSERT/UPDATE queries to avoid incorrect - results - - * Disallows volatile functions in UPDATE subqueries to avoid incorrect results - - * Fixes CREATE INDEX CONCURRENTLY crash with local execution - - * Fixes citus_finish_pg_upgrade to drop all backup tables - - * Fixes a bug that cause failures when RECURSIVE VIEW joined reference table - - * Fixes DROP SEQUENCE failures when metadata syncing is enabled - - * Fixes a bug that caused CREATE TABLE with CHECK constraint to fail - - * Fixes a bug that could cause VACUUM to deadlock - - * Fixes master_update_node failure when no background worker slots are available - - * Fixes a bug that caused replica identity to not be propagated on shard repair - - * Fixes a bug that could cause crashes after connection timeouts - - * Fixes a bug that could cause crashes with certain compile flags - - * Fixes a bug that could cause deadlocks on CREATE INDEX - - * Fixes a bug with genetic query optimization in outer joins - - * Fixes a crash when aggregating empty tables - - * Fixes a crash with inserting domain constrained composite types - - * Fixes a crash with multi-row & router INSERT's in local execution - - * Fixes a possibility of doing temporary file cleanup more than once - - * Fixes incorrect setting of join related fields - - * Fixes memory issues around deparsing index commands - - * Fixes reference table access tracking for sequential execution - - * Fixes removal of a single node with only reference tables - - * Fixes sending commands to coordinator when it is added as a worker - - * Fixes write queries with const expressions and COLLATE in various places - - * Fixes wrong cancellation message about distributed deadlock - - -- Onur Tirtir Wed, 11 Nov 2020 15:00:27 +0000 - -citus (9.4.2.citus-1) stable; urgency=low - - * Fixes a bug that could lead to multiple maintenance daemons - - * Fixes an issue preventing views in reference table modifications - - -- Onur Tirtir Thu, 22 Oct 2020 8:53:44 +0000 - -citus (9.4.1.citus-1) stable; urgency=low - - * Fixes EXPLAIN ANALYZE output truncation - - * Fixes a deadlock during transaction recovery - - -- Onur Tirtir Wed, 30 Sep 2020 9:33:46 +0000 - -citus (9.4.0.citus-1) stable; urgency=low - - * Improves COPY by honoring max_adaptive_executor_pool_size config - - * Adds support for insert into local table select from distributed table - - * Adds support to partially push down tdigest aggregates - - * Adds support for receiving binary encoded results from workers using - citus.enable_binary_protocol - - * Enables joins between local tables and CTEs - - * Adds showing query text in EXPLAIN output when explain verbose is true - - * Adds support for showing CTE statistics in EXPLAIN ANALYZE - - * Adds support for showing amount of data received in EXPLAIN ANALYZE - - * Introduces downgrade paths in migration scripts - - * Avoids returning incorrect results when changing roles in a transaction - - * Fixes ALTER TABLE IF EXISTS SET SCHEMA with non-existing table bug - - * Fixes CREATE INDEX CONCURRENTLY with no index name on a postgres table bug - - * Fixes a bug that could cause crashes with certain compile flags - - * Fixes a bug with lists of configuration values in ALTER ROLE SET statements - - * Fixes a bug that occurs when coordinator is added as a worker node - - * Fixes a crash because of overflow in partition id with certain compile flags - - * Fixes a crash that may happen if no worker nodes are added - - * Fixes a crash that occurs when inserting implicitly coerced constants - - * Fixes a crash when aggregating empty tables - - * Fixes a memory leak in subtransaction memory handling - - * Fixes crash when using rollback to savepoint after cancellation of DML - - * Fixes deparsing for queries with anonymous column references - - * Fixes distribution of composite types failing to include typemods - - * Fixes explain analyze on adaptive executor repartitions - - * Fixes possible error throwing in abort handle - - * Fixes segfault when evaluating func calls with default params on coordinator - - * Fixes several EXPLAIN ANALYZE issues - - * Fixes write queries with const expressions and COLLATE in various places - - * Fixes wrong cancellation message about distributed deadlocks - - * Reports correct INSERT/SELECT method in EXPLAIN - - * Disallows triggers on citus tables - - -- Onur Tirtir Tue, 28 Jul 2020 13:22:31 +0000 - -citus (9.3.5.citus-1) stable; urgency=low - - * Fixes ALTER TABLE IF EXISTS SET SCHEMA with non-existing table bug - - * Fixes CREATE INDEX CONCURRENTLY with no index name on a postgres table bug - - * Fixes a crash because of overflow in partition id with certain compile flags - - -- Onur Tirtir Mon, 27 Jul 2020 7:28:18 +0000 - -citus (9.3.4.citus-1) stable; urgency=low - - * Fixes a bug that could cause crashes with certain compile flags - - * Fixes a bug with lists of configuration values in ALTER ROLE SET statements - - * Fixes deparsing for queries with anonymous column references - - -- Onur Tirtir Wed, 22 Jul 2020 9:00:01 +0000 - -citus (9.3.3.citus-1) stable; urgency=low - - * Fixes a memory leak in subtransaction memory handling - - -- Onur Tirtir Mon, 13 Jul 2020 8:47:40 +0000 - -citus (9.3.0.citus-1) stable; urgency=low - - * Adds max_shared_pool_size to control number of connections across sessions - - * Adds support for window functions on coordinator - - * Improves shard pruning logic to understand OR-conditions - - * Prevents using an extra connection for intermediate result multi-casts - - * Adds propagation of ALTER ROLE .. SET statements - - * Adds update_distributed_table_colocation UDF to update colocation of tables - - * Introduces a UDF to truncate local data after distributing a table - - * Adds support for creating temp schemas in parallel - - * Adds support for evaluation of nextval in the target list on coordinator - - * Adds support for local execution of COPY/TRUNCATE/DROP/DDL commands - - * Adds support for local execution of shard creation - - * Uses local execution in a transaction block - - * Adds support for querying distributed table sizes concurrently - - * Allows master_copy_shard_placement to replicate placements to new nodes - - * Allows table type to be used in target list - - * Avoids having multiple maintenance daemons active for a single database - - * Defers reference table replication to shard creation time - - * Enables joins between local tables and reference tables in transaction blocks - - * Ignores pruned target list entries in coordinator plan - - * Improves SIGTERM handling of maintenance daemon - - * Increases the default of citus.node_connection_timeout to 30 seconds - - * Fixes a bug that occurs when creating remote tasks in local execution - - * Fixes a bug that causes some DML queries containing aggregates to fail - - * Fixes a bug that could cause failures in queries with subqueries or CTEs - - * Fixes a bug that may cause some connection failures to throw errors - - * Fixes a bug which caused queries with SRFs and function evalution to fail - - * Fixes a bug with generated columns when executing COPY dist_table TO file - - * Fixes a crash when using non-constant limit clauses - - * Fixes a failure when composite types used in prepared statements - - * Fixes a possible segfault when dropping dist. table in a transaction block - - * Fixes a possible segfault when non-pushdownable aggs are solely used in HAVING - - * Fixes a segfault when executing queries using GROUPING - - * Fixes an error when using LEFT JOIN with GROUP BY on primary key - - * Fixes an issue with distributing tables having generated cols not at the end - - * Fixes automatic SSL permission issue when using "initdb --allow-group-access" - - * Fixes errors which could occur when subqueries are parameters to aggregates - - * Fixes possible issues by invalidating the plan cache in master_update_node - - * Fixes timing issues which could be caused by changing system clock - - -- Onur Tirtir Thu, 7 May 2020 15:11:25 +0000 - -citus (9.2.4.citus-1) stable; urgency=low - - * Fixes a release problem in 9.2.3 - - -- Onur Tirtir Tue, 31 Mar 2020 08:06:59 +0000 - -citus (9.2.3.citus-1) stable; urgency=low - - * Do not use C functions that have been banned by Microsoft - - * Fixes a bug that causes wrong results with complex outer joins - - * Fixes issues found using static analysis - - * Fixes left join shard pruning in pushdown planner - - * Fixes possibility for segmentation fault in internal aggregate functions - - * Fixes possible segfault when non pushdownable aggregates are used in HAVING - - * Improves correctness of planning subqueries in HAVING - - * Prevents using old connections for security if citus.node_conninfo changed - - * Uses Microsoft approved cipher string for default TLS setup - - -- Onur Tirtir Thu, 26 Mar 2020 8:22:48 +0000 - -citus (9.0.2.citus-1) stable; urgency=low - - * Fixes build errors on EL/OL 6 based distros - - * Fixes a bug that caused maintenance daemon to fail on standby nodes - - * Disallows distributed function creation when replication_model is `statement` - - -- Onur Tirtir Fri, 6 Mar 2020 14:10:16 +0000 - -citus (9.2.2.citus-1) stable; urgency=low - - * Fixes a bug that caused some prepared stmts with function calls to fail - - * Fixes a bug that caused some prepared stmts with composite types to fail - - * Fixes a bug that caused missing subplan results in workers - - * Improves performance of re-partition joins - - -- Onur Tirtir Fri, 6 Mar 2020 07:14:20 +0000 - -citus (9.2.1.citus-1) stable; urgency=low - - * Fixes a bug that could cause crashes if distribution key is NULL - - -- Onur Tirtir Fri, 14 Feb 2020 11:51:09 +0000 - -citus (9.2.0.citus-1) stable; urgency=low - - * Adds support for INSERT...SELECT queries with re-partitioning - - * Adds citus.coordinator_aggregation_strategy to support more aggregates - - * Adds caching of local plans on shards for Citus MX - - * Adds compatibility support for dist. object infrastructure from old versions - - * Adds defering shard-pruning for fast-path router queries to execution - - * Adds propagation of GRANT ... ON SCHEMA queries - - * Adds support for CTE pushdown via CTE inlining in distributed planning - - * Adds support for ALTER TABLE ... SET SCHEMA propagation. - - * Adds support for DROP ROUTINE & ALTER ROUTINE commands - - * Adds support for any inner join on a reference table - - * Changes citus.log_remote_commands level to NOTICE - - * Disallows marking ref. table shards unhealthy in the presence of savepoints - - * Disallows placing new shards with shards in TO_DELETE state - - * Enables local execution of queries that do not need any data access - - * Fixes Makefile trying to cleanup PG directory during install - - * Fixes a bug causing errors when planning a query with multiple subqueries - - * Fixes a possible deadlock that could happen during shard moves - - * Fixes a problem when adding a new node due to tables referenced in func body - - * Fixes an issue that could cause joins with reference tables to be slow - - * Fixes cached metadata for shard is inconsistent issue - - * Fixes inserting multiple composite types as partition key in VALUES - - * Fixes unnecessary repartition on joins with more than 4 tables - - * Prevents wrong results for replicated partitioned tables after failure - - * Restricts LIMIT approximation for non-commutative aggregates - - -- Onur Tirtir Wed, 10 Feb 2020 8:48:00 +0000 - -citus (9.1.1.citus-1) stable; urgency=low - - * Fixes a bug causing SQL-executing UDFs to crash when passing in DDL - - * Fixes a bug that caused column_to_column_name to crash for invalid input - - * Fixes a bug that caused inserts into local tables w/ dist. subqueries to crash - - * Fixes a bug that caused some noop DML statements to fail - - * Fixes a bug that prevents dropping reference table columns - - * Fixes a crash in IN (.., NULL) queries - - * Fixes a crash when calling a distributed function from PL/pgSQL - - * Fixes an issue that caused CTEs to sometimes leak connections - - * Fixes strange errors in DML with unreachable sublinks - - * Prevents statements in SQL functions to run outside of a transaction - - -- Onur Tirtir Wed, 18 Dec 2019 14:32:42 +0000 - -citus (9.1.0.citus-1) stable; urgency=low - - * Adds extensions to distributed object propagation infrastructure - - * Adds support for ALTER ROLE propagation - - * Adds support for aggregates in create_distributed_function - - * Adds support for expressions in reference joins - - * Adds support for returning RECORD in multi-shard queries - - * Adds support for simple IN subqueries on unique cols in repartition joins - - * Adds support for subqueries in HAVING clauses - - * Automatically distributes unary aggs w/ combinefunc and non-internal stype - - * Disallows distributed func creation when replication_model is 'statement' - - * Drops support for deprecated real-time and router executors - - * Fixes a bug in local execution that could cause missing rows in RETURNING - - * Fixes a bug that caused maintenance daemon to fail on standby nodes - - * Fixes a bug that caused other CREATE EXTENSION commands to take longer - - * Fixes a bug that prevented REFRESH MATERIALIZED VIEW - - * Fixes a bug when view is used in modify statements - - * Fixes a memory leak in adaptive executor when query returns many columns - - * Fixes underflow init of default values in worker extended op node creation - - * Fixes potential segfault in standard_planner inlining functions - - * Fixes an issue that caused failures in RHEL 6 builds - - * Fixes queries with repartition joins and group by unique column - - * Improves CTE/Subquery performance by pruning intermediate rslt broadcasting - - * Removes citus.worker_list_file GUC - - * Revokes usage from the citus schema from public - - -- Onur Tirtir Thu, 28 Nov 2019 15:11:05 +0000 - -citus (9.0.1.citus-1) stable; urgency=low - - * Fixes a memory leak in the executor - - * Revokes usage from the citus schema from public - - -- Hanefi Onaldi Wed, 30 Oct 2019 8:53:22 +0000 - -citus (9.0.0.citus-1) stable; urgency=low - - * Adds support for PostgreSQL 12 - - * Adds UDFs to help with PostgreSQL upgrades - - * Distributes types to worker nodes - - * Introduces create_distributed_function UDF - - * Introduces local query execution for Citus MX - - * Implements infrastructure for routing CALL to MX workers - - * Implements infrastructure for routing SELECT function() to MX workers - - * Adds support for foreign key constraints between reference tables - - * Adds a feature flag to turn off CREATE TYPE propagation - - * Adds option citus.single_shard_commit_protocol - - * Adds support for EXPLAIN SUMMARY - - * Adds support for GENERATE ALWAYS AS STORED - - * Adds support for serial and smallserial in MX mode - - * Adds support for anon composite types on the target list in router queries - - * Avoids race condition between create_reference_table & master_add_node - - * Fixes a bug in schemas of distributed sequence definitions - - * Fixes a bug that caused run_command_on_colocated_placements to fail - - * Fixes a bug that leads to various issues when a connection is lost - - * Fixes a schema leak on CREATE INDEX statement - - * Fixes assert failure in bare SELECT FROM reference table FOR UPDATE in MX - - * Makes master_update_node MX compatible - - * Prevents pg_dist_colocation from multiple records for reference tables - - * Prevents segfault in worker_partition_protocol edgecase - - * Propagates ALTER FUNCTION statements for distributed functions - - * Propagates CREATE OR REPLACE FUNCTION for distributed functions - - * Propagates REINDEX on tables & indexes - - * Provides a GUC to turn of the new dependency propagation functionality - - * Uses 2PC in adaptive executor when dealing with replication factors above 1 - - -- Hanefi Onaldi Tue, 15 Oct 2019 16:54:50 +0000 - -citus (8.3.2.citus-1) stable; urgency=low - - * Fixes performance issues by skipping unnecessary relation access recordings - - -- Hanefi Onaldi Fri, 9 Aug 2019 11:15:57 +0000 - -citus (8.3.1.citus-1) stable; urgency=low - - * Improves Adaptive Executor performance - - -- Hanefi Onaldi Mon, 29 Jul 2019 10:25:50 +0000 - -citus (8.3.0.citus-1) stable; urgency=low - - * Adds a new distributed executor: Adaptive Executor - - * citus.enable_statistics_collection defaults to off (opt-in) - - * Adds support for CTEs in router planner for modification queries - - * Adds support for propagating SET LOCAL at xact start - - * Adds option to force master_update_node during failover - - * Deprecates master_modify_multiple_shards - - * Improves round robin logic on router queries - - * Creates all distributed schemas as superuser on a separate connection - - * Makes COPY adapt to connection use behaviour of previous commands - - * Replaces SESSION_LIFESPAN with configurable no. of connections at xact end - - * Propagates ALTER FOREIGN TABLE commands to workers - - * Don't schedule tasks on inactive nodes - - * Makes DROP/VALIDATE CONSTRAINT tolerant of ambiguous shard extension - - * Fixes an issue with subquery map merge jobs as non-root - - * Fixes null pointers caused by partial initialization of ConnParamsHashEntry - - * Fixes errors caused by joins with shadowed aliases - - * Fixes a regression in outer joining subqueries introduced in 8.2.0 - - * Fixes a crash that can occur under high memory load - - * Fixes a bug that selects wrong worker when using round-robin assignment - - * Fixes savepoint rollback after multi-shard modify/copy failure - - * Fixes bad foreign constraint name search - - * Fixes a bug that prevents stack size to be adjusted - - -- Hanefi Onaldi Wed, 10 Jul 2019 15:19:02 +0000 - -citus (8.2.2.citus-1) stable; urgency=low - - * Fixes a bug in outer joins wrapped in subqueries - - -- Burak Velioglu Wed, 12 Jun 2019 8:45:08 +0000 - -citus (8.2.1.citus-1) stable; urgency=low - - * Fixes a bug that prevents stack size to be adjusted - - -- Burak Velioglu Wed, 3 Apr 2019 20:56:47 +0000 - -citus (8.1.2.citus-1) stable; urgency=low - - * Don't do redundant ALTER TABLE consistency checks at coordinator - - * Fixes a bug that prevents stack size to be adjusted - - * Fix an issue with some DECLARE .. CURSOR WITH HOLD commands - - -- Burak Velioglu Wed, 3 Apr 2019 20:34:46 +0000 - -citus (8.2.0.citus-1) stable; urgency=low - - * Removes support and code for PostgreSQL 9.6 - - * Enable more outer joins with reference tables - - * Execute CREATE INDEX CONCURRENTLY in parallel - - * Treat functions as transaction blocks - - * Add support for column aliases on join clauses - - * Skip standard_planner() for trivial queries - - * Added support for function calls in joins - - * Round-robin task assignment policy relies on local transaction id - - * Relax subquery union pushdown restrictions for reference tables - - * Speed-up run_command_on_shards() - - * Address some memory issues in connection config - - * Restrict visibility of get_*_active_transactions functions to pg_monitor - - * Don't do redundant ALTER TABLE consistency checks at coordinator - - * Queries with only intermediate results do not rely on task assignment policy - - * Finish connection establishment in parallel for multiple connections - - * Fixes a bug related to pruning shards using a coerced value - - * Fix an issue with some DECLARE .. CURSOR WITH HOLD commands - - * Fixes a bug that could lead to infinite recursion during recursive planning - - * Fixes a bug that could prevent planning full outer joins with using clause - - * Fixes a bug that could lead to memory leak on citus_relation_size - - * Fixes a problem that could cause segmentation fault with recursive planning - - * Switch CI solution to CircleCI - - -- Burak Velioglu Fri, 29 Mar 2019 07:36:09 +0000 - -citus (8.0.3.citus-1) stable; urgency=low - - * Fixes maintenance daemon panic due to unreleased spinlock - - * Fixes an issue with having clause when used with complex joins - - -- Hanefi Onaldi Wed, 9 Jan 2019 9:50:07 +0000 - -citus (8.1.1.citus-1) stable; urgency=low - - * Fixes maintenance daemon panic due to unreleased spinlock - - * Fixes an issue with having clause when used with complex joins - - -- Hanefi Onaldi Mon, 7 Jan 2019 16:26:13 +0000 - -citus (8.1.0.citus-1) stable; urgency=low - - * Turns on ssl by default for new installations of citus - - * Restricts SSL Ciphers to TLS1.2 and above - - * Adds support for INSERT INTO SELECT..ON CONFLICT/RETURNING via coordinator - - * Adds support for round-robin task assignment for queries to reference tables - - * Adds support for SQL tasks using worker_execute_sql_task UDF with task-tracker - - * Adds support for VALIDATE CONSTRAINT queries - - * Adds support for disabling hash aggregate with HLL - - * Adds user ID suffix to intermediate files generated by task-tracker - - * Only allow transmit from pgsql_job_cache directory - - * Disallows GROUPING SET clauses in subqueries - - * Removes restriction on user-defined group ID in node addition functions - - * Relaxes multi-shard modify locks when enable_deadlock_prevention is disabled - - * Improves security in task-tracker protocol - - * Improves permission checks in internal DROP TABLE functions - - * Improves permission checks in cluster management functions - - * Cleans up UDFs and fixes permission checks - - * Fixes crashes caused by stack size increase under high memory load - - * Fixes a bug that could cause maintenance daemon panic - - -- Burak Velioglu Tue, 18 Dec 2018 15:12:45 +0000 - -citus (8.0.2.citus-1) stable; urgency=low - - * Fixes a bug that could cause maintenance daemon panic - - * Fixes crashes caused by stack size increase under high memory load - - -- Burak Velioglu Thu, 13 Dec 2018 13:56:44 +0000 - -citus (7.5.4.citus-1) stable; urgency=low - - * Fixes a bug that could cause maintenance daemon panic - - -- Burak Velioglu Wed, 12 Dec 2018 11:45:24 +0000 - -citus (8.0.1.citus-1) stable; urgency=low - - * Execute SQL tasks using worker_execute_sql_task UDF when using task-tracker - - -- Burak Velioglu Wed, 28 Nov 2018 11:38:47 +0000 - -citus (7.5.3.citus-1) stable; urgency=low - - * Execute SQL tasks using worker_execute_sql_task UDF when using task-tracker - - -- Burak Velioglu Wed, 28 Nov 2018 10:52:20 +0000 - -citus (7.5.2.citus-1) stable; urgency=low - - * Fixes inconsistent metadata error when shard metadata caching get interrupted - - * Fixes a bug that could cause memory leak - - * Fixes a bug that prevents recovering wrong transactions in MX - - * Fixes a bug to prevent wrong memory accesses on Citus MX under very high load - - * Fixes crashes caused by stack size increase under high memory load - - -- Burak Velioglu Wed, 14 Nov 2018 20:42:16 +0000 - -citus (8.0.0.citus-1) stable; urgency=low - - * Adds support for PostgreSQL 11 - - * Adds support for applying DML operations on reference tables from MX nodes - - * Adds distributed locking to truncated MX tables - - * Adds support for running TRUNCATE command from MX worker nodes - - * Adds views to provide insight about the distributed transactions - - * Adds support for TABLESAMPLE in router queries - - * Adds support for INCLUDE option in index creation - - * Adds option to allow simple DML commands from hot standby - - * Adds support for partitioned tables with replication factor > 1 - - * Prevents a deadlock on concurrent DROP TABLE and SELECT on Citus MX - - * Fixes a bug that prevents recovering wrong transactions in MX - - * Fixes a bug to prevent wrong memory accesses on Citus MX under very high load - - * Fixes a bug in MX mode, calling DROP SCHEMA with existing partitioned table - - * Fixes a bug that could cause modifying CTEs to select wrong execution mode - - * Fixes a bug preventing rollback in CREATE PROCEDURE - - * Fixes a bug on not being able to drop index on a partitioned table - - * Fixes a bug on TRUNCATE when there is a foreign key to a reference table - - * Fixes a performance issue in prepared INSERT..SELECT - - * Fixes a bug which causes errors on DROP DATABASE IF EXISTS - - * Fixes a bug to remove intermediate result directory in pull-push execution - - * Improves query pushdown planning performance - - * Evaluate functions anywhere in query - - -- Burak Velioglu Fri, 02 Nov 2018 08:06:42 +0000 - -citus (7.5.1.citus-1) stable; urgency=low - - * Improves query pushdown planning performance - - * Fixes a bug that could cause modifying CTEs to select wrong execution mode - - -- Burak Velioglu Wed, 29 Aug 2018 08:06:42 +0000 - -citus (7.4.2.citus-1) stable; urgency=low - - * Fixes a segfault in real-time executor during online shard move - - -- Mehmet Furkan Sahin Fri, 27 Jul 2018 13:42:27 +0000 - -citus (7.5.0.citus-1) stable; urgency=low - - * Adds foreign key support from hash distributed to reference tables - - * Adds SELECT ... FOR UPDATE support for router plannable queries - - * Adds support for non-partition columns in count distinct - - * Fixes a segfault in real-time executor during online shard move - - * Fixes ALTER TABLE ADD COLUMN constraint check - - * Fixes a bug where INSERT ... SELECT allows one to update dist. column - - * Allows DDL commands to be sequentialized via citus.multi_shard_modify_mode - - * Adds support for topn_union_agg and topn_add_agg across shards - - * Adds support for hll_union_agg and hll_add_agg across shards - - * Fixes a bug that might cause shards to have a wrong owner - - * Adds select_opens_transaction_block GUC - - * Adds utils to implement DDLs for policies in future - - * Makes intermediate results to use separate connections - - * Adds a node_conninfo GUC to set outgoing connection settings - - -- Mehmet Furkan Sahin Wed, 25 Jul 2018 9:32:24 +0000 - -citus (6.2.6.citus-1) stable; urgency=low - - * Adds support for respecting enable_hashagg in the master planner - - -- Burak Velioglu Fri, 06 Jul 2018 13:30:08 +0000 - -citus (7.4.1.citus-1) stable; urgency=low - - * Fixes a bug that could cause txns to incorrectly proceed after failure - - * Fixes a bug on INSERT ... SELECT queries in prepared statements - - -- Burak Velioglu Wed, 20 Jun 2018 12:25:30 +0000 - -citus (7.2.2.citus-1) stable; urgency=low - - * Fixes a bug that could cause SELECTs to crash during a rebalance - - -- Burak Velioglu Thu, 17 May 2018 11:51:56 +0000 - -citus (7.4.0.citus-1) stable; urgency=low - - * Adds support for non-pushdownable subqueries and CTEs in UPDATE/DELETE - - * Adds support for pushdownable subqueries and joins in UPDATE/DELETE - - * Adds faster shard pruning for subqueries - - * Adds partitioning support to MX table - - * Adds support for (VACUUM | ANALYZE) VERBOSE - - * Adds support for multiple ANDs in HAVING for pushdown planner - - * Adds support for quotation needy schema names - - * Improves operator check time in physical planner for custom data types - - * Removes broadcast join logic - - * Deprecates large_table_shard_count and master_expire_table_cache() - - * Modifies master_update_node to write-lock shards hosted by node over update - - * DROP TABLE now drops shards as the currrent user instead of the superuser - - * Adds specialised error codes for connection failures - - * Improves error messages on connection failure - - * Fixes issue which prevented multiple citus_table_size calls per query - - * Tests are updated to use create_distributed_table - - -- Burak Velioglu Tue, 15 May 2018 13:01:17 +0000 - -citus (7.3.0.citus-1) stable; urgency=low - - * Adds support for non-colocated joins between subqueries - - * Adds support for window functions that can be pushed down to worker - - * Adds support for modifying CTEs - - * Adds recursive plan for WHERE clause subqueries with recurring FROM clause - - * Adds support for bool_ and bit_ aggregates - - * Adds support for Postgres jsonb and json aggregation functions - - * Adds support for respecting enable_hashagg in the master plan - - * Performance improvements to reduce distributed planning time - - * Fixes a bug on planner when aggregate is used in ORDER BY - - * Fixes a bug on planner when DISTINCT (ON) clause is used with GROUP BY - - * Fixes a planner bug with distinct and aggregate clauses - - * Fixes a bug that opened new connections on each table size function call - - * Fixes a bug canceling backends not involved in distributed deadlocks - - * Fixes count distinct bug on column expressions when used with subqueries - - * Improves error handling on worker node failures - - * Improves error messages for INSERT queries that have subqueries - - -- Burak Velioglu Thu, 15 Mar 2018 14:16:10 +0000 - -citus (7.2.1.citus-1) stable; urgency=low - - * Fixes count distinct bug on column expressions when used with subqueries - - * Adds support for respecting enable_hashagg in the master plan - - * Fixes a bug canceling backends not involved in distributed deadlocks - - -- Burak Velioglu Tue, 06 Feb 2018 14:46:07 +0000 - -citus (7.2.0.citus-1) stable; urgency=low - - * Adds support for CTEs - - * Adds support for subqueries that require merge step - - * Adds support for set operations (UNION, INTERSECT, ...) - - * Adds support for 2PC auto-recovery - - * Adds support for querying local tables in CTEs and subqueries - - * Adds support for more SQL coverage in subqueries for reference tables - - * Adds support for count(distinct) in queries with a subquery - - * Adds support for non-equijoins when there is already an equijoin - - * Adds support for real-time executor to run in transaction blocks - - * Adds infrastructure for storing intermediate distributed query results - - * Adds a new GUC named enable_repartition_joins for auto executor switch - - * Adds support for limiting the intermediate result size - - * Improves support for queries with unions containing filters - - * Improves support for queries with unions containing joins - - * Improves support for subqueries in the WHERE clause - - * Increases COPY throughput - - * Enables pushing down queries containing only recurring tuples and GROUP BY - - * Load-balance queries that read from 0 shards - - * Improves support for using functions in subqueries - - * Fixes a bug that causing real-time executor to crash during cancellation - - * Fixes a bug that causing real-time executor to get stuck on cancellation - - * Fixes a bug that could block modification queries unnecessarily - - * Fixes a bug that could cause assigning wrong IDs to transactions - - * Fixes a bug that could cause an assert failure with ANALYZE statements - - * Fixes a bug that would push down wrong set operations in subqueries - - * Fixes a bug that could cause a deadlock in create_distributed_table - - * Fixes a bug that could confuse user about ANALYZE usage - - * Fixes a bug causing false positive distributed deadlock detections - - * Relaxes the locking for DDL commands on partitioned tables - - * Relaxes the locking on COPY with replication - - * Logs more remote commands when citus.log_remote_commands is set - - -- Burak Velioglu Tue, 16 Jan 2018 14:34:20 +0000 - -citus (6.2.5.citus-1) stable; urgency=low - - * Fixes a bug that could crash the coordinator while reporting a remote error - - -- Burak Velioglu Thu, 11 Jan 2018 11:40:28 +0000 - -citus (7.1.2.citus-1) stable; urgency=low - - * Fixes a bug that could cause assigning wrong IDs to transactions - - * Increases COPY throughput - - -- Burak Velioglu Fri, 05 Jan 2018 09:00:07 +0000 - -citus (7.1.1.citus-1) stable; urgency=low - - * Fixes a bug preventing pushing down subqueries with reference tables - - * Fixes a bug that could create false positive distributed deadlocks - - * Fixes a bug that could prevent running concurrent COPY and multi-shard DDL - - * Fixes a bug that could mislead users about ANALYZE queries - - -- Burak Velioglu Tue, 05 Dec 2017 09:00:07 +0000 - -citus (7.1.0.citus-1) stable; urgency=low - - * Adds support for native queries with multi shard UPDATE/DELETE queries - - * Expands reference table support in subquery pushdown - - * Adds window function support for subqueries and INSERT ... SELECT queries - - * Adds support for COUNT(DISTINCT) [ON] queries on non-partition columns - - * Adds support for DISTINCT [ON] queries on non-partition columns - - * Introduces basic usage statistic collector - - * Adds support for setting replica identity while creating distributed tables - - * Adds support for ALTER TABLE ... REPLICA IDENTITY queries - - * Adds pushdown support for LIMIT and HAVING grouped by partition key - - * Adds support for INSERT ... SELECT queries via worker nodes on MX clusters - - * Adds support for adding primary key using already defined index - - * Adds replication parameter to shard copy functions - - * Changes shard_name UDF to omit public schema name - - * Adds master_move_node UDF to make changes on nodename/nodeport more easy - - * Fixes a bug that could cause casting error with INSERT ... SELECT queries - - * Fixes a bug that could prevent upgrading servers from Citus 6.1 - - * Fixes a bug that could prevent attaching partitions to a table in schema - - * Fixes a bug preventing adding nodes to clusters with reference tables - - * Fixes a bug that could cause a crash with INSERT ... SELECT queries - - * Fixes a bug that could prevent creating a partitoned table on Cloud - - * Implements various performance improvements - - * Adds internal infrastructures and tests to improve development process - - * Addresses various race conditions and deadlocks - - * Improves and standardizes error messages - - -- Burak Velioglu Wed, 15 Nov 2017 09:00:07 +0000 - -citus (7.0.3.citus-1) stable; urgency=low - - * Fixes several bugs that could cause crash - - * Fixes a bug that could cause deadlock while creating reference tables - - * Fixes a bug that could cause false-positives in deadlock detection - - * Fixes a bug that could cause 2PC recovery not to work from MX workers - - * Fixes a bug that could cause cache incohorency - - * Fixes a bug that could cause maintenance daemon to skip cache invalidations - - * Improves performance of transaction recovery by using correct index - - -- Burak Yucesoy Mon, 16 Oct 2017 11:52:07 +0000 - -citus (7.0.2.citus-1) stable; urgency=low - - * Updates task-tracker to limit file access - - -- Burak Yucesoy Thu, 28 Sep 2017 22:29:01 +0000 - -citus (6.2.4.citus-1) stable; urgency=low - - * Updates task-tracker to limit file access - - -- Burak Yucesoy Thu, 28 Sep 2017 21:31:35 +0000 - -citus (6.1.3.citus-1) stable; urgency=low - - * Updates task-tracker to limit file access - - -- Burak Yucesoy Thu, 28 Sep 2017 20:31:35 +0000 - -citus (7.0.1.citus-1) stable; urgency=low - - * Fixes a bug that could cause memory leaks in INSERT ... SELECT queries - - * Fixes a bug that could cause incorrect execution of prepared statements - - * Fixes a bug that could cause excessive memory usage during COPY - - * Incorporates latest changes from core PostgreSQL code - - -- Burak Yucesoy Tue, 12 Sep 2017 17:53:50 +0000 - -citus (7.0.0.citus-1) stable; urgency=low - - * Adds support for PostgreSQL 10 - - * Drops support for PostgreSQL 9.5 - - * Adds support for multi-row INSERT - - * Adds support for router UPDATE and DELETE queries with subqueries - - * Adds infrastructure for distributed deadlock detection - - * Deprecates enable_deadlock_prevention flag - - * Adds support for partitioned tables - - * Adds support for creating UNLOGGED tables - - * Adds support for SAVEPOINT - - * Adds UDF citus_create_restore_point for taking distributed snapshots - - * Adds support for evaluating non-pushable INSERT ... SELECT queries - - * Adds support for subquery pushdown on reference tables - - * Adds shard pruning support for IN and ANY - - * Adds support for UPDATE and DELETE commands that prune down to 0 shard - - * Enhances transaction support by relaxing some transaction restrictions - - * Fixes a bug causing crash if distributed table has no shards - - * Fixes a bug causing crash when removing inactive node - - * Fixes a bug causing failure during COPY on tables with dropped columns - - * Fixes a bug causing failure during DROP EXTENSION - - * Fixes a bug preventing executing VACUUM and INSERT concurrently - - * Fixes a bug in prepared INSERT statements containing an implicit cast - - * Fixes several issues related to statement cancellations and connections - - * Fixes several 2PC related issues - - * Removes an unnecessary dependency causing warning messages in pg_dump - - * Adds internal infrastructure for follower clusters - - * Adds internal infrastructure for progress tracking - - * Implements various performance improvements - - * Adds internal infrastructures and tests to improve development process - - * Addresses various race conditions and deadlocks - - * Improves and standardizes error messages - - -- Burak Yucesoy Mon, 28 Aug 2017 12:27:53 +0000 - -citus (6.2.3.citus-1) stable; urgency=low - - * Fixes a crash during execution of local CREATE INDEX CONCURRENTLY - - * Fixes a bug preventing usage of quoted column names in COPY - - * Fixes a bug in prepared INSERTs with implicit cast in partition column - - * Relaxes locks in VACUUM to ensure concurrent execution with INSERT - - -- Burak Yucesoy Thu, 13 Jul 2017 11:27:17 +0000 - -citus (6.2.2.citus-1) stable; urgency=low - - * Fixes a common cause of deadlocks when repairing tables with foreign keys - - -- Burak Velioglu Wed, 07 Jun 2017 09:42:17 +0000 - -citus (6.1.2.citus-1) stable; urgency=low - - * Fixes a common cause of deadlocks when repairing tables with foreign keys - - -- Jason Petersen Wed, 31 May 2017 16:14:11 +0000 - -citus (6.2.1.citus-1) stable; urgency=low - - * Relaxes version-check logic to avoid breaking non-distributed commands - - -- Jason Petersen Wed, 24 May 2017 22:36:07 +0000 - -citus (6.2.0.citus-1) stable; urgency=low - - * Increases SQL subquery coverage by pushing down more kinds of queries - - * Adds CustomScan API support to allow read-only transactions - - * Adds support for CREATE/DROP INDEX CONCURRENTLY - - * Adds support for ALTER TABLE ... ADD CONSTRAINT - - * Adds support for ALTER TABLE ... RENAME COLUMN - - * Adds support for DISABLE/ENABLE TRIGGER ALL - - * Adds support for expressions in the partition column in INSERTs - - * Adds support for query parameters in combination with function evaluation - - * Adds support for creating distributed tables from non-empty local tables - - * Adds UDFs to get size of distributed tables - - * Adds UDFs to add a new node without replicating reference tables - - * Adds checks to prevent running Citus binaries with wrong metadata tables - - * Improves shard pruning performance for range queries - - * Improves planner performance for joins involving co-located tables - - * Improves shard copy performance by creating indexes after copy - - * Improves task-tracker performance by batching several status checks - - * Enables router planner for queries on range partitioned table - - * Changes TRUNCATE to drop local data only if enable_ddl_propagation is off - - * Starts to execute DDL on coordinator before workers - - * Fixes a bug causing incorrectly reading invalidated cache - - * Fixes a bug related to creation of schemas in workers with incorrect owner - - * Fixes a bug related to concurrent run of shard drop functions - - * Fixes a bug related to EXPLAIN ANALYZE with DML queries - - * Fixes a bug related to SQL functions in FROM clause - - * Adds a GUC variable to report cross shard queries - - * Fixes a bug related to partition columns without native hash function - - * Adds internal infrastructures and tests to improve development process - - * Addresses various race conditions and deadlocks - - * Improves and standardizes error messages - - -- Burak Yucesoy Tue, 16 May 2017 16:05:22 +0000 - -citus (6.1.1.citus-1) stable; urgency=low - - * Fixes a crash caused by router executor use after connection timeouts - - * Fixes a crash caused by relation cache invalidation during COPY - - * Fixes bug related to DDL use within PL/pgSQL functions - - * Fixes a COPY bug related to types lacking binary output functions - - * Fixes a bug related to modifications with parameterized partition values - - * Fixes improper value interpolation in worker sequence generation - - * Guards shard pruning logic against zero-shard tables - - * Fixes possible NULL pointer dereference and buffer underflow, via PVS-Studio - - * Fixes a INSERT...SELECT bug that could push down non-partition column JOINs - - -- Metin Doslu Fri, 5 May 2017 17:42:00 +0000 - -citus (6.1.0.citus-1) stable; urgency=low - - * Implements reference tables, transactionally replicated to all nodes - - * Adds upgrade_to_reference_table UDF to upgrade pre-6.1 reference tables - - * Expands prepared statement support to nearly all statements - - * Adds support for creating VIEWs which reference distributed tables - - * Adds targeted VACUUM/ANALYZE support - - * Adds support for the FILTER clause in aggregate expressions - - * Adds support for function evaluation within INSERT INTO ... SELECT - - * Adds support for creating foreign key constraints with ALTER TABLE - - * Adds logic to choose router planner for all queries it supports - - * Enhances create_distributed_table with parameter for explicit colocation - - * Adds generally useful utility UDFs previously available as "Citus Tools" - - * Adds user-facing UDFs for locking shard resources and metadata - - * Refactors connection and transaction management for more consistency - - * Enhances COPY with fully transactional semantics - - * Improves support for cancellation for a number of queries and commands - - * Adds column_to_column_name UDF to help users understand partkey values - - * Adds master_disable_node UDF for temporarily disabling nodes - - * Adds proper MX ("masterless") metadata propagation logic - - * Adds start_metadata_sync_to_node UDF to propagate metadata changes to nodes - - * Enhances SERIAL compatibility with MX tables - - * Adds an node_connection_timeout parameter to set node connection timeouts - - * Adds enable_deadlock_prevention setting to permit multi-node transactions - - * Adds a replication_model setting to specify replication of new tables - - * Changes the shard_replication_factor setting's default value to one - - * Adds code to automatically set max_prepared_transactions if not configured - - * Accelerates lookup of colocated shard placements - - * Fixes a bug affecting INSERT INTO ... SELECT queries using constant values - - * Fixes a bug by ensuring COPY does not mark placements inactive - - * Fixes a bug affecting reads from pg_dist_shard_placement table - - * Fixes a crash triggered by creating a foreign key without a column - - * Fixes a crash related to accessing catalog tables after aborted transaction - - * Fixes a bug affecting JOIN queries requiring repartitions - - * Fixes a bug affecting node insertions to pg_dist_node table - - * Fixes a crash triggered by queries with modifying common table expressions - - * Fixes a bug affecting workloads with concurrent shard appends and deletions - - * Addresses various race conditions and deadlocks - - * Improves and standardizes error messages - - -- Burak Yucesoy Thu, 9 Feb 2017 16:17:41 +0000 - -citus (6.0.1.citus-3) stable; urgency=low - - * First build using new versioning practices - - -- Jason Petersen Wed, 8 Feb 2017 23:19:46 +0000 - -citus (6.0.1.citus-2) stable; urgency=low - - * Transitional package to guide users to new package name - - -- Jason Petersen Mon, 6 Feb 2017 16:33:44 +0000 - -citus (6.0.1.citus-1) stable; urgency=low - - * Fixes a bug causing failures during pg_upgrade - - * Fixes a bug preventing DML queries during colocated table creation - - * Fixes a bug that caused NULL parameters to be incorrectly passed as text - - -- Burak Yucesoy Wed, 30 Nov 2016 15:27:38 +0000 - -citus (6.0.0.citus-1) stable; urgency=low - - * Adds compatibility with PostgreSQL 9.6, now the recommended version - - * Removes the pg_worker_list.conf file in favor of a pg_dist_node table - - * Adds master_add_node and master_add_node UDFs to manage membership - - * Removes the \stage command and corresponding csql binary in favor of COPY - - * Removes copy_to_distributed_table in favor of first-class COPY support - - * Adds support for multiple DDL statements within a transaction - - * Adds support for certain foreign key constraints - - * Adds support for parallel INSERT INTO ... SELECT against colocated tables - - * Adds support for the TRUNCATE command - - * Adds support for HAVING clauses in SELECT queries - - * Adds support for EXCLUDE constraints which include the partition column - - * Adds support for system columns in queries (tableoid, ctid, etc.) - - * Adds support for relation name extension within INDEX definitions - - * Adds support for no-op UPDATEs of the partition column - - * Adds several general-purpose utility UDFs to aid in Citus maintenance - - * Adds master_expire_table_cache UDF to forcibly expire cached shards - - * Parallelizes the processing of DDL commands which affect distributed tables - - * Adds support for repartition jobs using composite or custom types - - * Enhances object name extension to handle long names and large shard counts - - * Parallelizes the master_modify_multiple_shards UDF - - * Changes distributed table creation to error if target table is not empty - - * Changes the pg_dist_shard.logicalrelid column from an oid to regclass - - * Adds a placementid column to pg_dist_shard_placement, replacing Oid use - - * Removes the pg_dist_shard.shardalias distribution metadata column - - * Adds pg_dist_partition.repmodel to track tables using streaming replication - - * Adds internal infrastructure to take snapshots of distribution metadata - - * Addresses the need to invalidate prepared statements on metadata changes - - * Adds a mark_tables_colocated UDF for denoting pre-6.0 manual colocation - - * Fixes a bug affecting prepared statement execution within PL/pgSQL - - * Fixes a bug affecting COPY commands using composite types - - * Fixes a bug that could cause crashes during EXPLAIN EXECUTE - - * Separates worker and master job temporary folders - - * Eliminates race condition between distributed modification and repair - - * Relaxes the requirement that shard repairs also repair colocated shards - - * Implements internal functions to track which tables' shards are colocated - - * Adds pg_dist_partition.colocationid to track colocation group membership - - * Extends shard copy and move operations to respect colocation settings - - * Adds pg_dist_local_group to prepare for future MX-related changes - - * Adds create_distributed_table to easily create shards and infer colocation - - -- Jason Petersen Tue, 8 Nov 2016 19:45:45 +0000 - -citus (5.2.2.citus-1) stable; urgency=low - - * Adds support for IF NOT EXISTS clause of CREATE INDEX command - - * Adds support for RETURN QUERY and FOR ... IN PL/pgSQL features - - * Extends the router planner to handle more queries - - * Changes COUNT of zero-row sets to return 0 rather than an empty result - - * Reduces the minimum permitted task_tracker_delay to a single millisecond - - * Fixes a bug that caused crashes during joins with a WHERE false clause - - * Fixes a bug triggered by unique violation errors raised in long txns - - * Fixes a bug resulting in multiple registration of transaction callbacks - - * Fixes a bug which could result in stale reads of distribution metadata - - * Fixes a bug preventing distributed modifications in some PL/pgSQL functions - - * Fixes some code paths that could hypothetically read uninitialized memory - - * Lowers log level of "waiting for activity" messages - - -- Jason Petersen Tue, 8 Nov 2016 18:43:37 +0000 - -citus (5.2.1.citus-1) stable; urgency=low - - * Fixes subquery pushdown to properly extract outer join qualifiers - - * Addresses possible memory leak during multi-shard transactions - - -- Jason Petersen Tue, 6 Sep 2016 20:47:15 +0000 - -citus (5.2.0.citus-1) stable; urgency=low - - * Drops support for PostgreSQL 9.4; PostgreSQL 9.5 is required - - * Adds schema support for tables, named objects (types, operators, etc.) - - * Evaluates non-immutable functions on master in all modification commands - - * Adds support for SERIAL types in non-partition columns - - * Adds support for RETURNING clause in INSERT, UPDATE, and DELETE commands - - * Adds support for multi-statement transactions using a fixed set of nodes - - * Full SQL support for SELECT queries which can be executed on single worker - - * Adds option to perform DDL changes using prepared transactions (2PC) - - * Adds an enable_ddl_propagation parameter to control DDL propagation - - * Accelerates shard pruning during merges - - * Adds master_modify_multiple_shards UDF to modify many shards at once - - * Adds COPY support for arrays of user-defined types - - * Now supports parameterized prepared statements for certain use cases - - * Extends LIMIT/OFFSET support to all executor types - - * Constraint violations now fail fast rather than hitting all placements - - * Makes master_create_empty_shard aware of shard placement policy - - * Reduces unnecessary sleep during queries processed by real-time executor - - * Improves task tracker executor's task cleanup logic - - * Relaxes restrictions on cancellation of DDL commands - - * Removes ONLY keyword from worker SELECT queries - - * Error message improvements and standardization - - * Moves master_update_shard_statistics function to pg_catalog schema - - * Fixes a bug where hash-partitioned anti-joins could return bad results - - * Now sets storage type correctly for foreign table-backed shards - - * Fixes master_update_shard_statistics issue with hash-partitioned tables - - * Fixes an issue related to extending table names that require escaping - - * Reduces risk of row counter overflows during modifications - - * Fixes a crash related to FILTER clause use in COUNT DISTINCT subqueries - - * Fixes crashes related to partition columns with high attribute numbers - - * Fixes certain subquery and join crashes - - * Detects flex for build even if PostgreSQL was built without it - - * Fixes assert-enabled crash when all_modifications_commutative is true - - -- Jason Petersen Wed, 17 Aug 2016 10:23:21 +0000 - -citus (5.2.0~rc.1-1) testing; urgency=low - - * Release candidate for 5.2. - - -- Jason Petersen Mon, 01 Aug 2016 17:01:05 +0000 - -citus (5.1.1-1) stable; urgency=low - - * Adds complex count distinct expression support in repartitioned subqueries - - * Improves task tracker job cleanup logic, addressing a memory leak - - * Fixes bug that generated incorrect results for LEFT JOIN queries - - * Improves compatibility with Debian's reproducible builds project - - * Fixes build issues on FreeBSD platforms - - -- Jason Petersen Fri, 17 Jun 2016 16:20:15 +0000 - -citus (5.1.0-1) stable; urgency=low - - * Adds distributed COPY to rapidly populate distributed tables - - * Adds support for using EXPLAIN on distributed queries - - * Recognizes and fast-paths single-shard SELECT statements automatically - - * Increases INSERT throughput via shard pruning optimizations - - * Improves planner performance for joins involving tables with many shards - - * Adds ability to pass columns as arguments to function calls in UPDATEs - - * Introduces transaction manager for use by multi-shard commands - - * Adds COUNT(DISTINCT ...) pushdown optimization for hash-partitioned tables - - * Adds support for some UNIQUE indexes on hash- or range-partitioned tables - - * Deprecates \stage in favor of using COPY for append-partition tables - - * Deprecates copy_to_distributed_table in favor of first-class COPY support - - * Fixes build problems when using non-packaged PostgreSQL installs - - * Fixes bug that sometimes skipped pruning when partitioned by VARCHAR column - - * Fixes bug impeding use of user functions in repartitioned subqueries - - * Fixes bug involving queries with equality comparisons of boolean types - - * Fixes crash that prevented use alongside pg_stat_statements - - * Fixes crash arising from SELECT queries that lack a target list - - * Improves warning and error messages - - -- Jason Petersen Tue, 17 May 2016 16:55:02 +0000 - -citus (5.1.0~rc.2-1) testing; urgency=low - - * Fix EXPLAIN output when FORMAT JSON in use - - -- Jason Petersen Mon, 16 May 2016 11:08:09 +0000 - -citus (5.1.0~rc.1-1) testing; urgency=low - - * Release candidate for 5.1. - - -- Jason Petersen Wed, 04 May 2016 19:26:23 +0000 - -citus (5.0.1-1) stable; urgency=low - - * Fixes issues on 32-bit systems - - -- Jason Petersen Fri, 15 Apr 2016 19:17:35 +0000 - -citus (5.0.0-1) stable; urgency=low - - * Initial release - - -- Jason Petersen Thu, 24 Mar 2016 10:12:52 -0400 +citus (10.1.4.citus-1) stable; urgency=low + + * Official 10.1.4 release of Citus + + -- Gurkan Indibay Tue, 01 Feb 2022 11:49:11 +0000 + +citus (10.2.3.citus-1) stable; urgency=low + + * Official 10.2.3 release of Citus + + -- Gurkan Indibay Mon, 29 Nov 2021 11:00:41 +0000 + +citus (10.0.6.citus-1) stable; urgency=low + + * Official 10.0.6 release of Citus + + -- Gurkan Indibay Fri, 12 Nov 2021 11:37:25 +0000 + +citus (9.5.10.citus-1) stable; urgency=low + + * Official 9.5.10 release of Citus + + -- Gurkan Indibay Mon, 08 Nov 2021 14:18:56 +0000 + +citus (9.2.8.citus-1) stable; urgency=low + + * Official 9.2.8 release of Citus + + -- Gurkan Indibay Thu, 04 Nov 2021 12:45:09 +0000 + +citus (9.2.7.citus-1) stable; urgency=low + + * Official 9.2.7 release of Citus + + -- Gurkan Indibay Wed, 03 Nov 2021 09:34:30 +0000 + +citus (10.2.2.citus-1) stable; urgency=low + + * Official 10.2.2 release of Citus + + -- Gurkan Indibay Thu, 14 Oct 2021 13:00:27 +0000 + +citus (10.2.1.citus-1) stable; urgency=low + + * Adds missing version-mismatch checks for columnar tables + + * Adds missing version-mismatch checks for internal functions + + * Fixes a bug that could cause partition shards being not co-located with + parent shards + + * Fixes a bug that prevents pushing down boolean expressions when using + columnar custom scan + + * Fixes a clog lookup failure that could occur when writing to a columnar + table + + * Fixes an issue that could cause unexpected errors when there is an + in-progress write to a columnar table + + * Revokes read access to `columnar.chunk` from unprivileged user + + -- Gurkan Indibay Fri, 24 Sep 2021 12:03:35 +0000 + +citus (10.1.3.citus-1) stable; urgency=low + + * Official 10.1.3 release of Citus + + -- Gurkan Indibay Fri, 17 Sep 2021 17:23:05 +0000 + +citus (10.2.0.citus-1) stable; urgency=low + + * Adds PostgreSQL 14 support + + * Adds hash & btree index support for columnar tables + + * Adds helper UDFs for easy time partition management: + `get_missing_time_partition_ranges`, `create_time_partitions`, and + `drop_old_time_partitions` + + * Adds propagation of ALTER SEQUENCE + + * Adds support for ALTER INDEX ATTACH PARTITION + + * Adds support for CREATE INDEX ON ONLY + + * Allows more graceful failovers when replication factor > 1 + + * Enables chunk group filtering to work with Params for columnar tables + + * Enables qual push down for joins including columnar tables + + * Enables transferring of data using binary encoding by default on PG14 + + * Improves `master_update_table_statistics` and provides distributed deadlock + detection + + * Includes `data_type` and `cache` in sequence definition on worker + + * Makes start/stop_metadata_sync_to_node() transactional + + * Makes sure that table exists before updating table statistics + + * Prevents errors with concurrent `citus_update_table_statistics` and DROP table + + * Reduces memory usage of columnar table scans by freeing the memory used for + last stripe read + + * Shows projected columns for columnar tables in EXPLAIN output + + * Speeds up dropping partitioned tables + + * Synchronizes hasmetadata flag on mx workers + + * Uses current user while syncing metadata + + * Adds a parameter to cleanup metadata when metadata syncing is stopped + + * Fixes a bug about int and smallint sequences on MX + + * Fixes a bug that cause partitions to have wrong distribution key after + DROP COLUMN + + * Fixes a bug that caused `worker_append_table_to_shard` to write as superuser + + * Fixes a bug that caused `worker_create_or_alter_role` to crash with NULL input + + * Fixes a bug that causes pruning incorrect shard of a range distributed table + + * Fixes a bug that may cause crash while aborting transaction + + * Fixes a bug that prevents attaching partitions when colocated foreign key + exists + + * Fixes a bug with `nextval('seq_name'::text)` + + * Fixes a crash in shard rebalancer when no distributed tables exist + + * Fixes a segfault caused by use after free in when using a cached connection + + * Fixes a UNION pushdown issue + + * Fixes a use after free issue that could happen when altering a distributed + table + + * Fixes showing target shard size in the rebalance progress monitor + + -- Gurkan Indibay Thu, 16 Sep 2021 08:45:17 +0000 + +citus (10.1.2.citus-1) stable; urgency=low + + * Allows more graceful failovers when replication factor > 1 + + * Fixes a bug that causes partitions to have wrong distribution key after + `DROP COLUMN` + + -- Gurkan Indibay Tue, 17 Aug 2021 16:25:14 +0000 + +citus (10.0.5.citus-1) stable; urgency=low + + * Allows more graceful failovers when replication factor > 1 + + * Fixes a bug that causes partitions to have wrong distribution key after + `DROP COLUMN` + + * Improves citus_update_table_statistics and provides distributed deadlock + detection + + -- Gurkan Indibay Tue, 17 Aug 2021 14:42:54 +0000 + +citus (9.5.7.citus-1) stable; urgency=low + + * Allows more graceful failovers when replication factor > 1 + + * Fixes a bug that causes partitions to have wrong distribution key after + `DROP COLUMN` + + * Improves master_update_table_statistics and provides distributed deadlock + detection + + -- Gurkan Indibay Tue, 17 Aug 2021 13:18:11 +0000 + +citus (9.4.6.citus-1) stable; urgency=low + + * Allows more graceful failovers when replication factor > 1 + + * Improves master_update_table_statistics and provides distributed deadlock + detection + + -- Gurkan Indibay Wed, 11 Aug 2021 08:57:04 +0000 + +citus (10.1.1.citus-1) stable; urgency=low + + * Improves citus_update_table_statistics and provides distributed deadlock + detection + + * Fixes showing target shard size in the rebalance progress monitor + + -- Gurkan Indibay Fri, 06 Aug 2021 08:38:37 +0000 + +citus (10.1.0.citus-1) stable; urgency=low + + * Drops support for PostgreSQL 11 + + * Adds `shard_count` parameter to `create_distributed_table` function + + * Adds support for `ALTER DATABASE OWNER` + + * Adds support for temporary columnar tables + + * Adds support for using sequences as column default values when syncing + metadata + + * `alter_columnar_table_set` enforces columnar table option constraints + + * Continues to remove shards after failure in `DropMarkedShards` + + * Deprecates the `citus.replication_model` GUC + + * Enables `citus.defer_drop_after_shard_move` by default + + * Ensures free disk space before moving a shard + + * Fetches shard size on the fly for the rebalance monitor + + * Ignores old placements when disabling or removing a node + + * Implements `improvement_threshold` at shard rebalancer moves + + * Improves orphaned shard cleanup logic + + * Improves performance of `citus_shards` + + * Introduces `citus.local_hostname` GUC for connections to the current node + + * Makes sure connection is closed after each shard move + + * Makes sure that target node in shard moves is eligible for shard move + + * Optimizes partitioned disk size calculation for shard rebalancer + + * Prevents connection errors by properly terminating connections + + * Prevents inheriting a distributed table + + * Prevents users from dropping & truncating known shards + + * Pushes down `VALUES` clause as long as not in outer part of a `JOIN` + + * Reduces memory usage for multi-row inserts + + * Reduces memory usage while rebalancing shards + + * Removes length limits around partition names + + * Removes dependencies on the existence of public schema + + * Executor avoids opening extra connections + + * Excludes orphaned shards while finding shard placements + + * Preserves access method of materialized views when undistributing + or altering distributed tables + + * Fixes a bug that allowed moving of shards belonging to a reference table + + * Fixes a bug that can cause a crash when DEBUG4 logging is enabled + + * Fixes a bug that causes pruning incorrect shard of a range distributed table + + * Fixes a bug that causes worker_create_or_alter_role to crash with NULL input + + * Fixes a bug where foreign key to reference table was disallowed + + * Fixes a bug with local cached plans on tables with dropped columns + + * Fixes data race in `get_rebalance_progress` + + * Fixes `FROM ONLY` queries on partitioned tables + + * Fixes an issue that could cause citus_finish_pg_upgrade to fail + + * Fixes error message for local table joins + + * Fixes issues caused by omitting public schema in queries + + * Fixes nested `SELECT` query with `UNION` bug + + * Fixes null relationName bug at parallel execution + + * Fixes possible segfaults when using Citus in the middle of an upgrade + + * Fixes problems with concurrent calls of `DropMarkedShards` + + * Fixes shared dependencies that are not resident in a database + + * Fixes stale hostnames bug in prepared statements after `master_update_node` + + * Fixes the relation size bug during rebalancing + + * Fixes two race conditions in the get_rebalance_progress + + * Fixes using 2PC when it might be necessary + + -- Gurkan Indibay Fri, 16 Jul 2021 15:37:21 +0000 + +citus (10.0.4.citus-1) stable; urgency=low + + * Introduces `citus.local_hostname` GUC for connections to the current node + + * Removes dependencies on the existence of public schema + + * Removes limits around long partition names + + * Fixes a bug that can cause a crash when DEBUG4 logging is enabled + + * Fixes a bug that causes pruning incorrect shard of a range distributed table + + * Fixes an issue that could cause citus_finish_pg_upgrade to fail + + * Fixes FROM ONLY queries on partitioned tables + + * Fixes issues caused by public schema being omitted in queries + + * Fixes problems with concurrent calls of DropMarkedShards + + * Fixes relname null bug when using parallel execution + + * Fixes two race conditions in the get_rebalance_progress + + -- Gurkan Indibay Fri, 16 Jul 2021 10:58:55 +0000 + +citus (9.5.6.citus-1) stable; urgency=low + + * Fixes minor bug in `citus_prepare_pg_upgrade` that caused it to lose its + idempotency + + -- Gurkan Fri, 09 Jul 2021 13:30:42 +0000 + +citus (9.4.5.citus-1) stable; urgency=low + + * Adds a configure flag to enforce security + + * Avoids re-using connections for intermediate results + + * Fixes a bug that causes pruning incorrect shard of a range distributed table + + * Fixes a bug that might cause self-deadlocks when COPY used in TX block + + * Fixes an issue that could cause citus_finish_pg_upgrade to fail + + -- Gurkan Thu, 08 Jul 2021 10:15:23 +0000 + +citus (10.0.3.citus-1) stable; urgency=low + + * Prevents infinite recursion for queries that involve UNION ALL + below `JOIN` + + * Fixes a crash in queries with a modifying CTE and a SELECT + without `FROM` + + * Fixes upgrade and downgrade paths for citus_update_table_statistics + + * Fixes a bug that causes SELECT queries to use 2PC unnecessarily + + * Fixes a bug that might cause self-deadlocks with + `CREATE INDEX` / `REINDEX CONCURRENTLY` commands + + * Adds citus.max_cached_connection_lifetime GUC to set maximum connection + lifetime + + * Adds citus.remote_copy_flush_threshold GUC that controls + per-shard memory usages by `COPY` + + * Adds citus_get_active_worker_nodes UDF to deprecate + `master_get_active_worker_nodes` + + * Skips 2PC for readonly connections in a transaction + + * Makes sure that local execution starts coordinated transaction + + * Removes open temporary file warning when cancelling a query with + an open tuple store + + * Relaxes the locks when adding an existing node + + -- Gurkan Indibay Thu, 18 Mar 2021 01:40:08 +0000 + +citus (10.0.2.citus-1) stable; urgency=low + + * Adds a configure flag to enforce security + + * Fixes a bug due to cross join without target list + + * Fixes a bug with UNION ALL on PG 13 + + * Fixes a compatibility issue with pg_audit in utility calls + + * Fixes insert query with CTEs/sublinks/subqueries etc + + * Grants SELECT permission on citus_tables view to public + + * Grants SELECT permission on columnar metadata tables to public + + * Improves citus_update_table_statistics and provides distributed deadlock + detection + + * Preserves colocation with procedures in alter_distributed_table + + * Prevents using alter_columnar_table_set and alter_columnar_table_reset + on a columnar table not owned by the user + + * Removes limits around long table names + + -- Gurkan Indibay Thu, 4 Mar 2021 2:46:54 +0000 + +citus (9.5.2.citus-1) stable; urgency=low + + * Fixes distributed deadlock detection being blocked by metadata sync + + * Prevents segfaults when SAVEPOINT handling cannot recover from connection + failures + + * Fixes possible issues that might occur with single shard distributed tables + + -- gurkanindibay Wed, 27 Jan 2021 11:25:38 +0000 + +citus (9.4.4.citus-1) stable; urgency=low + + * Fixes a bug that could cause router queries with local tables to be pushed + down + + * Fixes a segfault in connection management due to invalid connection hash + entries + + * Fixes possible issues that might occur with single shard distributed tables + + -- gurkanindibay Tue, 5 Jan 2021 14:58:56 +0000 + +citus (9.5.1.citus-1) stable; urgency=low + + * Enables PostgreSQL's parallel queries on EXPLAIN ANALYZE + + * Fixes a bug that could cause excessive memory consumption when a partition is + created + + * Fixes a bug that triggers subplan executions unnecessarily with cursors + + * Fixes a segfault in connection management due to invalid connection hash + entries + + -- Onur Tirtir Wed, 2 Dec 2020 14:28:44 +0000 + +citus (9.4.3.citus-1) stable; urgency=low + + * Enables PostgreSQL's parallel queries on EXPLAIN ANALYZE + + * Fixes a bug that triggers subplan executions unnecessarily with cursors + + -- Onur Tirtir Tue, 24 Nov 2020 11:17:57 +0000 + +citus (9.5.0.citus-1) stable; urgency=low + + * Adds support for PostgreSQL 13 + + * Removes the task-tracker executor + + * Introduces citus local tables + + * Introduces undistribute_table UDF to convert tables back to postgres tables + + * Adds support for EXPLAIN (ANALYZE) EXECUTE and EXPLAIN EXECUTE + + * Adds support for EXPLAIN (ANALYZE, WAL) for PG13 + + * Sorts the output of EXPLAIN (ANALYZE) by execution duration. + + * Adds support for CREATE TABLE ... USING table_access_method + + * Adds support for WITH TIES option in SELECT and INSERT SELECT queries + + * Avoids taking multi-shard locks on workers + + * Enforces citus.max_shared_pool_size config in COPY queries + + * Enables custom aggregates with multiple parameters to be executed on workers + + * Enforces citus.max_intermediate_result_size in local execution + + * Improves cost estimation of INSERT SELECT plans + + * Introduces delegation of procedures that read from reference tables + + * Prevents pull-push execution for simple pushdownable subqueries + + * Improves error message when creating a foreign key to a local table + + * Makes citus_prepare_pg_upgrade idempotent by dropping transition tables + + * Disallows ON TRUE outer joins with reference & distributed tables when + reference table is outer relation to avoid incorrect results + + * Disallows field indirection in INSERT/UPDATE queries to avoid incorrect + results + + * Disallows volatile functions in UPDATE subqueries to avoid incorrect results + + * Fixes CREATE INDEX CONCURRENTLY crash with local execution + + * Fixes citus_finish_pg_upgrade to drop all backup tables + + * Fixes a bug that cause failures when RECURSIVE VIEW joined reference table + + * Fixes DROP SEQUENCE failures when metadata syncing is enabled + + * Fixes a bug that caused CREATE TABLE with CHECK constraint to fail + + * Fixes a bug that could cause VACUUM to deadlock + + * Fixes master_update_node failure when no background worker slots are available + + * Fixes a bug that caused replica identity to not be propagated on shard repair + + * Fixes a bug that could cause crashes after connection timeouts + + * Fixes a bug that could cause crashes with certain compile flags + + * Fixes a bug that could cause deadlocks on CREATE INDEX + + * Fixes a bug with genetic query optimization in outer joins + + * Fixes a crash when aggregating empty tables + + * Fixes a crash with inserting domain constrained composite types + + * Fixes a crash with multi-row & router INSERT's in local execution + + * Fixes a possibility of doing temporary file cleanup more than once + + * Fixes incorrect setting of join related fields + + * Fixes memory issues around deparsing index commands + + * Fixes reference table access tracking for sequential execution + + * Fixes removal of a single node with only reference tables + + * Fixes sending commands to coordinator when it is added as a worker + + * Fixes write queries with const expressions and COLLATE in various places + + * Fixes wrong cancellation message about distributed deadlock + + -- Onur Tirtir Wed, 11 Nov 2020 15:00:27 +0000 + +citus (9.4.2.citus-1) stable; urgency=low + + * Fixes a bug that could lead to multiple maintenance daemons + + * Fixes an issue preventing views in reference table modifications + + -- Onur Tirtir Thu, 22 Oct 2020 8:53:44 +0000 + +citus (9.4.1.citus-1) stable; urgency=low + + * Fixes EXPLAIN ANALYZE output truncation + + * Fixes a deadlock during transaction recovery + + -- Onur Tirtir Wed, 30 Sep 2020 9:33:46 +0000 + +citus (9.4.0.citus-1) stable; urgency=low + + * Improves COPY by honoring max_adaptive_executor_pool_size config + + * Adds support for insert into local table select from distributed table + + * Adds support to partially push down tdigest aggregates + + * Adds support for receiving binary encoded results from workers using + citus.enable_binary_protocol + + * Enables joins between local tables and CTEs + + * Adds showing query text in EXPLAIN output when explain verbose is true + + * Adds support for showing CTE statistics in EXPLAIN ANALYZE + + * Adds support for showing amount of data received in EXPLAIN ANALYZE + + * Introduces downgrade paths in migration scripts + + * Avoids returning incorrect results when changing roles in a transaction + + * Fixes ALTER TABLE IF EXISTS SET SCHEMA with non-existing table bug + + * Fixes CREATE INDEX CONCURRENTLY with no index name on a postgres table bug + + * Fixes a bug that could cause crashes with certain compile flags + + * Fixes a bug with lists of configuration values in ALTER ROLE SET statements + + * Fixes a bug that occurs when coordinator is added as a worker node + + * Fixes a crash because of overflow in partition id with certain compile flags + + * Fixes a crash that may happen if no worker nodes are added + + * Fixes a crash that occurs when inserting implicitly coerced constants + + * Fixes a crash when aggregating empty tables + + * Fixes a memory leak in subtransaction memory handling + + * Fixes crash when using rollback to savepoint after cancellation of DML + + * Fixes deparsing for queries with anonymous column references + + * Fixes distribution of composite types failing to include typemods + + * Fixes explain analyze on adaptive executor repartitions + + * Fixes possible error throwing in abort handle + + * Fixes segfault when evaluating func calls with default params on coordinator + + * Fixes several EXPLAIN ANALYZE issues + + * Fixes write queries with const expressions and COLLATE in various places + + * Fixes wrong cancellation message about distributed deadlocks + + * Reports correct INSERT/SELECT method in EXPLAIN + + * Disallows triggers on citus tables + + -- Onur Tirtir Tue, 28 Jul 2020 13:22:31 +0000 + +citus (9.3.5.citus-1) stable; urgency=low + + * Fixes ALTER TABLE IF EXISTS SET SCHEMA with non-existing table bug + + * Fixes CREATE INDEX CONCURRENTLY with no index name on a postgres table bug + + * Fixes a crash because of overflow in partition id with certain compile flags + + -- Onur Tirtir Mon, 27 Jul 2020 7:28:18 +0000 + +citus (9.3.4.citus-1) stable; urgency=low + + * Fixes a bug that could cause crashes with certain compile flags + + * Fixes a bug with lists of configuration values in ALTER ROLE SET statements + + * Fixes deparsing for queries with anonymous column references + + -- Onur Tirtir Wed, 22 Jul 2020 9:00:01 +0000 + +citus (9.3.3.citus-1) stable; urgency=low + + * Fixes a memory leak in subtransaction memory handling + + -- Onur Tirtir Mon, 13 Jul 2020 8:47:40 +0000 + +citus (9.3.0.citus-1) stable; urgency=low + + * Adds max_shared_pool_size to control number of connections across sessions + + * Adds support for window functions on coordinator + + * Improves shard pruning logic to understand OR-conditions + + * Prevents using an extra connection for intermediate result multi-casts + + * Adds propagation of ALTER ROLE .. SET statements + + * Adds update_distributed_table_colocation UDF to update colocation of tables + + * Introduces a UDF to truncate local data after distributing a table + + * Adds support for creating temp schemas in parallel + + * Adds support for evaluation of nextval in the target list on coordinator + + * Adds support for local execution of COPY/TRUNCATE/DROP/DDL commands + + * Adds support for local execution of shard creation + + * Uses local execution in a transaction block + + * Adds support for querying distributed table sizes concurrently + + * Allows master_copy_shard_placement to replicate placements to new nodes + + * Allows table type to be used in target list + + * Avoids having multiple maintenance daemons active for a single database + + * Defers reference table replication to shard creation time + + * Enables joins between local tables and reference tables in transaction blocks + + * Ignores pruned target list entries in coordinator plan + + * Improves SIGTERM handling of maintenance daemon + + * Increases the default of citus.node_connection_timeout to 30 seconds + + * Fixes a bug that occurs when creating remote tasks in local execution + + * Fixes a bug that causes some DML queries containing aggregates to fail + + * Fixes a bug that could cause failures in queries with subqueries or CTEs + + * Fixes a bug that may cause some connection failures to throw errors + + * Fixes a bug which caused queries with SRFs and function evalution to fail + + * Fixes a bug with generated columns when executing COPY dist_table TO file + + * Fixes a crash when using non-constant limit clauses + + * Fixes a failure when composite types used in prepared statements + + * Fixes a possible segfault when dropping dist. table in a transaction block + + * Fixes a possible segfault when non-pushdownable aggs are solely used in HAVING + + * Fixes a segfault when executing queries using GROUPING + + * Fixes an error when using LEFT JOIN with GROUP BY on primary key + + * Fixes an issue with distributing tables having generated cols not at the end + + * Fixes automatic SSL permission issue when using "initdb --allow-group-access" + + * Fixes errors which could occur when subqueries are parameters to aggregates + + * Fixes possible issues by invalidating the plan cache in master_update_node + + * Fixes timing issues which could be caused by changing system clock + + -- Onur Tirtir Thu, 7 May 2020 15:11:25 +0000 + +citus (9.2.4.citus-1) stable; urgency=low + + * Fixes a release problem in 9.2.3 + + -- Onur Tirtir Tue, 31 Mar 2020 08:06:59 +0000 + +citus (9.2.3.citus-1) stable; urgency=low + + * Do not use C functions that have been banned by Microsoft + + * Fixes a bug that causes wrong results with complex outer joins + + * Fixes issues found using static analysis + + * Fixes left join shard pruning in pushdown planner + + * Fixes possibility for segmentation fault in internal aggregate functions + + * Fixes possible segfault when non pushdownable aggregates are used in HAVING + + * Improves correctness of planning subqueries in HAVING + + * Prevents using old connections for security if citus.node_conninfo changed + + * Uses Microsoft approved cipher string for default TLS setup + + -- Onur Tirtir Thu, 26 Mar 2020 8:22:48 +0000 + +citus (9.0.2.citus-1) stable; urgency=low + + * Fixes build errors on EL/OL 6 based distros + + * Fixes a bug that caused maintenance daemon to fail on standby nodes + + * Disallows distributed function creation when replication_model is `statement` + + -- Onur Tirtir Fri, 6 Mar 2020 14:10:16 +0000 + +citus (9.2.2.citus-1) stable; urgency=low + + * Fixes a bug that caused some prepared stmts with function calls to fail + + * Fixes a bug that caused some prepared stmts with composite types to fail + + * Fixes a bug that caused missing subplan results in workers + + * Improves performance of re-partition joins + + -- Onur Tirtir Fri, 6 Mar 2020 07:14:20 +0000 + +citus (9.2.1.citus-1) stable; urgency=low + + * Fixes a bug that could cause crashes if distribution key is NULL + + -- Onur Tirtir Fri, 14 Feb 2020 11:51:09 +0000 + +citus (9.2.0.citus-1) stable; urgency=low + + * Adds support for INSERT...SELECT queries with re-partitioning + + * Adds citus.coordinator_aggregation_strategy to support more aggregates + + * Adds caching of local plans on shards for Citus MX + + * Adds compatibility support for dist. object infrastructure from old versions + + * Adds defering shard-pruning for fast-path router queries to execution + + * Adds propagation of GRANT ... ON SCHEMA queries + + * Adds support for CTE pushdown via CTE inlining in distributed planning + + * Adds support for ALTER TABLE ... SET SCHEMA propagation. + + * Adds support for DROP ROUTINE & ALTER ROUTINE commands + + * Adds support for any inner join on a reference table + + * Changes citus.log_remote_commands level to NOTICE + + * Disallows marking ref. table shards unhealthy in the presence of savepoints + + * Disallows placing new shards with shards in TO_DELETE state + + * Enables local execution of queries that do not need any data access + + * Fixes Makefile trying to cleanup PG directory during install + + * Fixes a bug causing errors when planning a query with multiple subqueries + + * Fixes a possible deadlock that could happen during shard moves + + * Fixes a problem when adding a new node due to tables referenced in func body + + * Fixes an issue that could cause joins with reference tables to be slow + + * Fixes cached metadata for shard is inconsistent issue + + * Fixes inserting multiple composite types as partition key in VALUES + + * Fixes unnecessary repartition on joins with more than 4 tables + + * Prevents wrong results for replicated partitioned tables after failure + + * Restricts LIMIT approximation for non-commutative aggregates + + -- Onur Tirtir Wed, 10 Feb 2020 8:48:00 +0000 + +citus (9.1.1.citus-1) stable; urgency=low + + * Fixes a bug causing SQL-executing UDFs to crash when passing in DDL + + * Fixes a bug that caused column_to_column_name to crash for invalid input + + * Fixes a bug that caused inserts into local tables w/ dist. subqueries to crash + + * Fixes a bug that caused some noop DML statements to fail + + * Fixes a bug that prevents dropping reference table columns + + * Fixes a crash in IN (.., NULL) queries + + * Fixes a crash when calling a distributed function from PL/pgSQL + + * Fixes an issue that caused CTEs to sometimes leak connections + + * Fixes strange errors in DML with unreachable sublinks + + * Prevents statements in SQL functions to run outside of a transaction + + -- Onur Tirtir Wed, 18 Dec 2019 14:32:42 +0000 + +citus (9.1.0.citus-1) stable; urgency=low + + * Adds extensions to distributed object propagation infrastructure + + * Adds support for ALTER ROLE propagation + + * Adds support for aggregates in create_distributed_function + + * Adds support for expressions in reference joins + + * Adds support for returning RECORD in multi-shard queries + + * Adds support for simple IN subqueries on unique cols in repartition joins + + * Adds support for subqueries in HAVING clauses + + * Automatically distributes unary aggs w/ combinefunc and non-internal stype + + * Disallows distributed func creation when replication_model is 'statement' + + * Drops support for deprecated real-time and router executors + + * Fixes a bug in local execution that could cause missing rows in RETURNING + + * Fixes a bug that caused maintenance daemon to fail on standby nodes + + * Fixes a bug that caused other CREATE EXTENSION commands to take longer + + * Fixes a bug that prevented REFRESH MATERIALIZED VIEW + + * Fixes a bug when view is used in modify statements + + * Fixes a memory leak in adaptive executor when query returns many columns + + * Fixes underflow init of default values in worker extended op node creation + + * Fixes potential segfault in standard_planner inlining functions + + * Fixes an issue that caused failures in RHEL 6 builds + + * Fixes queries with repartition joins and group by unique column + + * Improves CTE/Subquery performance by pruning intermediate rslt broadcasting + + * Removes citus.worker_list_file GUC + + * Revokes usage from the citus schema from public + + -- Onur Tirtir Thu, 28 Nov 2019 15:11:05 +0000 + +citus (9.0.1.citus-1) stable; urgency=low + + * Fixes a memory leak in the executor + + * Revokes usage from the citus schema from public + + -- Hanefi Onaldi Wed, 30 Oct 2019 8:53:22 +0000 + +citus (9.0.0.citus-1) stable; urgency=low + + * Adds support for PostgreSQL 12 + + * Adds UDFs to help with PostgreSQL upgrades + + * Distributes types to worker nodes + + * Introduces create_distributed_function UDF + + * Introduces local query execution for Citus MX + + * Implements infrastructure for routing CALL to MX workers + + * Implements infrastructure for routing SELECT function() to MX workers + + * Adds support for foreign key constraints between reference tables + + * Adds a feature flag to turn off CREATE TYPE propagation + + * Adds option citus.single_shard_commit_protocol + + * Adds support for EXPLAIN SUMMARY + + * Adds support for GENERATE ALWAYS AS STORED + + * Adds support for serial and smallserial in MX mode + + * Adds support for anon composite types on the target list in router queries + + * Avoids race condition between create_reference_table & master_add_node + + * Fixes a bug in schemas of distributed sequence definitions + + * Fixes a bug that caused run_command_on_colocated_placements to fail + + * Fixes a bug that leads to various issues when a connection is lost + + * Fixes a schema leak on CREATE INDEX statement + + * Fixes assert failure in bare SELECT FROM reference table FOR UPDATE in MX + + * Makes master_update_node MX compatible + + * Prevents pg_dist_colocation from multiple records for reference tables + + * Prevents segfault in worker_partition_protocol edgecase + + * Propagates ALTER FUNCTION statements for distributed functions + + * Propagates CREATE OR REPLACE FUNCTION for distributed functions + + * Propagates REINDEX on tables & indexes + + * Provides a GUC to turn of the new dependency propagation functionality + + * Uses 2PC in adaptive executor when dealing with replication factors above 1 + + -- Hanefi Onaldi Tue, 15 Oct 2019 16:54:50 +0000 + +citus (8.3.2.citus-1) stable; urgency=low + + * Fixes performance issues by skipping unnecessary relation access recordings + + -- Hanefi Onaldi Fri, 9 Aug 2019 11:15:57 +0000 + +citus (8.3.1.citus-1) stable; urgency=low + + * Improves Adaptive Executor performance + + -- Hanefi Onaldi Mon, 29 Jul 2019 10:25:50 +0000 + +citus (8.3.0.citus-1) stable; urgency=low + + * Adds a new distributed executor: Adaptive Executor + + * citus.enable_statistics_collection defaults to off (opt-in) + + * Adds support for CTEs in router planner for modification queries + + * Adds support for propagating SET LOCAL at xact start + + * Adds option to force master_update_node during failover + + * Deprecates master_modify_multiple_shards + + * Improves round robin logic on router queries + + * Creates all distributed schemas as superuser on a separate connection + + * Makes COPY adapt to connection use behaviour of previous commands + + * Replaces SESSION_LIFESPAN with configurable no. of connections at xact end + + * Propagates ALTER FOREIGN TABLE commands to workers + + * Don't schedule tasks on inactive nodes + + * Makes DROP/VALIDATE CONSTRAINT tolerant of ambiguous shard extension + + * Fixes an issue with subquery map merge jobs as non-root + + * Fixes null pointers caused by partial initialization of ConnParamsHashEntry + + * Fixes errors caused by joins with shadowed aliases + + * Fixes a regression in outer joining subqueries introduced in 8.2.0 + + * Fixes a crash that can occur under high memory load + + * Fixes a bug that selects wrong worker when using round-robin assignment + + * Fixes savepoint rollback after multi-shard modify/copy failure + + * Fixes bad foreign constraint name search + + * Fixes a bug that prevents stack size to be adjusted + + -- Hanefi Onaldi Wed, 10 Jul 2019 15:19:02 +0000 + +citus (8.2.2.citus-1) stable; urgency=low + + * Fixes a bug in outer joins wrapped in subqueries + + -- Burak Velioglu Wed, 12 Jun 2019 8:45:08 +0000 + +citus (8.2.1.citus-1) stable; urgency=low + + * Fixes a bug that prevents stack size to be adjusted + + -- Burak Velioglu Wed, 3 Apr 2019 20:56:47 +0000 + +citus (8.1.2.citus-1) stable; urgency=low + + * Don't do redundant ALTER TABLE consistency checks at coordinator + + * Fixes a bug that prevents stack size to be adjusted + + * Fix an issue with some DECLARE .. CURSOR WITH HOLD commands + + -- Burak Velioglu Wed, 3 Apr 2019 20:34:46 +0000 + +citus (8.2.0.citus-1) stable; urgency=low + + * Removes support and code for PostgreSQL 9.6 + + * Enable more outer joins with reference tables + + * Execute CREATE INDEX CONCURRENTLY in parallel + + * Treat functions as transaction blocks + + * Add support for column aliases on join clauses + + * Skip standard_planner() for trivial queries + + * Added support for function calls in joins + + * Round-robin task assignment policy relies on local transaction id + + * Relax subquery union pushdown restrictions for reference tables + + * Speed-up run_command_on_shards() + + * Address some memory issues in connection config + + * Restrict visibility of get_*_active_transactions functions to pg_monitor + + * Don't do redundant ALTER TABLE consistency checks at coordinator + + * Queries with only intermediate results do not rely on task assignment policy + + * Finish connection establishment in parallel for multiple connections + + * Fixes a bug related to pruning shards using a coerced value + + * Fix an issue with some DECLARE .. CURSOR WITH HOLD commands + + * Fixes a bug that could lead to infinite recursion during recursive planning + + * Fixes a bug that could prevent planning full outer joins with using clause + + * Fixes a bug that could lead to memory leak on citus_relation_size + + * Fixes a problem that could cause segmentation fault with recursive planning + + * Switch CI solution to CircleCI + + -- Burak Velioglu Fri, 29 Mar 2019 07:36:09 +0000 + +citus (8.0.3.citus-1) stable; urgency=low + + * Fixes maintenance daemon panic due to unreleased spinlock + + * Fixes an issue with having clause when used with complex joins + + -- Hanefi Onaldi Wed, 9 Jan 2019 9:50:07 +0000 + +citus (8.1.1.citus-1) stable; urgency=low + + * Fixes maintenance daemon panic due to unreleased spinlock + + * Fixes an issue with having clause when used with complex joins + + -- Hanefi Onaldi Mon, 7 Jan 2019 16:26:13 +0000 + +citus (8.1.0.citus-1) stable; urgency=low + + * Turns on ssl by default for new installations of citus + + * Restricts SSL Ciphers to TLS1.2 and above + + * Adds support for INSERT INTO SELECT..ON CONFLICT/RETURNING via coordinator + + * Adds support for round-robin task assignment for queries to reference tables + + * Adds support for SQL tasks using worker_execute_sql_task UDF with task-tracker + + * Adds support for VALIDATE CONSTRAINT queries + + * Adds support for disabling hash aggregate with HLL + + * Adds user ID suffix to intermediate files generated by task-tracker + + * Only allow transmit from pgsql_job_cache directory + + * Disallows GROUPING SET clauses in subqueries + + * Removes restriction on user-defined group ID in node addition functions + + * Relaxes multi-shard modify locks when enable_deadlock_prevention is disabled + + * Improves security in task-tracker protocol + + * Improves permission checks in internal DROP TABLE functions + + * Improves permission checks in cluster management functions + + * Cleans up UDFs and fixes permission checks + + * Fixes crashes caused by stack size increase under high memory load + + * Fixes a bug that could cause maintenance daemon panic + + -- Burak Velioglu Tue, 18 Dec 2018 15:12:45 +0000 + +citus (8.0.2.citus-1) stable; urgency=low + + * Fixes a bug that could cause maintenance daemon panic + + * Fixes crashes caused by stack size increase under high memory load + + -- Burak Velioglu Thu, 13 Dec 2018 13:56:44 +0000 + +citus (7.5.4.citus-1) stable; urgency=low + + * Fixes a bug that could cause maintenance daemon panic + + -- Burak Velioglu Wed, 12 Dec 2018 11:45:24 +0000 + +citus (8.0.1.citus-1) stable; urgency=low + + * Execute SQL tasks using worker_execute_sql_task UDF when using task-tracker + + -- Burak Velioglu Wed, 28 Nov 2018 11:38:47 +0000 + +citus (7.5.3.citus-1) stable; urgency=low + + * Execute SQL tasks using worker_execute_sql_task UDF when using task-tracker + + -- Burak Velioglu Wed, 28 Nov 2018 10:52:20 +0000 + +citus (7.5.2.citus-1) stable; urgency=low + + * Fixes inconsistent metadata error when shard metadata caching get interrupted + + * Fixes a bug that could cause memory leak + + * Fixes a bug that prevents recovering wrong transactions in MX + + * Fixes a bug to prevent wrong memory accesses on Citus MX under very high load + + * Fixes crashes caused by stack size increase under high memory load + + -- Burak Velioglu Wed, 14 Nov 2018 20:42:16 +0000 + +citus (8.0.0.citus-1) stable; urgency=low + + * Adds support for PostgreSQL 11 + + * Adds support for applying DML operations on reference tables from MX nodes + + * Adds distributed locking to truncated MX tables + + * Adds support for running TRUNCATE command from MX worker nodes + + * Adds views to provide insight about the distributed transactions + + * Adds support for TABLESAMPLE in router queries + + * Adds support for INCLUDE option in index creation + + * Adds option to allow simple DML commands from hot standby + + * Adds support for partitioned tables with replication factor > 1 + + * Prevents a deadlock on concurrent DROP TABLE and SELECT on Citus MX + + * Fixes a bug that prevents recovering wrong transactions in MX + + * Fixes a bug to prevent wrong memory accesses on Citus MX under very high load + + * Fixes a bug in MX mode, calling DROP SCHEMA with existing partitioned table + + * Fixes a bug that could cause modifying CTEs to select wrong execution mode + + * Fixes a bug preventing rollback in CREATE PROCEDURE + + * Fixes a bug on not being able to drop index on a partitioned table + + * Fixes a bug on TRUNCATE when there is a foreign key to a reference table + + * Fixes a performance issue in prepared INSERT..SELECT + + * Fixes a bug which causes errors on DROP DATABASE IF EXISTS + + * Fixes a bug to remove intermediate result directory in pull-push execution + + * Improves query pushdown planning performance + + * Evaluate functions anywhere in query + + -- Burak Velioglu Fri, 02 Nov 2018 08:06:42 +0000 + +citus (7.5.1.citus-1) stable; urgency=low + + * Improves query pushdown planning performance + + * Fixes a bug that could cause modifying CTEs to select wrong execution mode + + -- Burak Velioglu Wed, 29 Aug 2018 08:06:42 +0000 + +citus (7.4.2.citus-1) stable; urgency=low + + * Fixes a segfault in real-time executor during online shard move + + -- Mehmet Furkan Sahin Fri, 27 Jul 2018 13:42:27 +0000 + +citus (7.5.0.citus-1) stable; urgency=low + + * Adds foreign key support from hash distributed to reference tables + + * Adds SELECT ... FOR UPDATE support for router plannable queries + + * Adds support for non-partition columns in count distinct + + * Fixes a segfault in real-time executor during online shard move + + * Fixes ALTER TABLE ADD COLUMN constraint check + + * Fixes a bug where INSERT ... SELECT allows one to update dist. column + + * Allows DDL commands to be sequentialized via citus.multi_shard_modify_mode + + * Adds support for topn_union_agg and topn_add_agg across shards + + * Adds support for hll_union_agg and hll_add_agg across shards + + * Fixes a bug that might cause shards to have a wrong owner + + * Adds select_opens_transaction_block GUC + + * Adds utils to implement DDLs for policies in future + + * Makes intermediate results to use separate connections + + * Adds a node_conninfo GUC to set outgoing connection settings + + -- Mehmet Furkan Sahin Wed, 25 Jul 2018 9:32:24 +0000 + +citus (6.2.6.citus-1) stable; urgency=low + + * Adds support for respecting enable_hashagg in the master planner + + -- Burak Velioglu Fri, 06 Jul 2018 13:30:08 +0000 + +citus (7.4.1.citus-1) stable; urgency=low + + * Fixes a bug that could cause txns to incorrectly proceed after failure + + * Fixes a bug on INSERT ... SELECT queries in prepared statements + + -- Burak Velioglu Wed, 20 Jun 2018 12:25:30 +0000 + +citus (7.2.2.citus-1) stable; urgency=low + + * Fixes a bug that could cause SELECTs to crash during a rebalance + + -- Burak Velioglu Thu, 17 May 2018 11:51:56 +0000 + +citus (7.4.0.citus-1) stable; urgency=low + + * Adds support for non-pushdownable subqueries and CTEs in UPDATE/DELETE + + * Adds support for pushdownable subqueries and joins in UPDATE/DELETE + + * Adds faster shard pruning for subqueries + + * Adds partitioning support to MX table + + * Adds support for (VACUUM | ANALYZE) VERBOSE + + * Adds support for multiple ANDs in HAVING for pushdown planner + + * Adds support for quotation needy schema names + + * Improves operator check time in physical planner for custom data types + + * Removes broadcast join logic + + * Deprecates large_table_shard_count and master_expire_table_cache() + + * Modifies master_update_node to write-lock shards hosted by node over update + + * DROP TABLE now drops shards as the currrent user instead of the superuser + + * Adds specialised error codes for connection failures + + * Improves error messages on connection failure + + * Fixes issue which prevented multiple citus_table_size calls per query + + * Tests are updated to use create_distributed_table + + -- Burak Velioglu Tue, 15 May 2018 13:01:17 +0000 + +citus (7.3.0.citus-1) stable; urgency=low + + * Adds support for non-colocated joins between subqueries + + * Adds support for window functions that can be pushed down to worker + + * Adds support for modifying CTEs + + * Adds recursive plan for WHERE clause subqueries with recurring FROM clause + + * Adds support for bool_ and bit_ aggregates + + * Adds support for Postgres jsonb and json aggregation functions + + * Adds support for respecting enable_hashagg in the master plan + + * Performance improvements to reduce distributed planning time + + * Fixes a bug on planner when aggregate is used in ORDER BY + + * Fixes a bug on planner when DISTINCT (ON) clause is used with GROUP BY + + * Fixes a planner bug with distinct and aggregate clauses + + * Fixes a bug that opened new connections on each table size function call + + * Fixes a bug canceling backends not involved in distributed deadlocks + + * Fixes count distinct bug on column expressions when used with subqueries + + * Improves error handling on worker node failures + + * Improves error messages for INSERT queries that have subqueries + + -- Burak Velioglu Thu, 15 Mar 2018 14:16:10 +0000 + +citus (7.2.1.citus-1) stable; urgency=low + + * Fixes count distinct bug on column expressions when used with subqueries + + * Adds support for respecting enable_hashagg in the master plan + + * Fixes a bug canceling backends not involved in distributed deadlocks + + -- Burak Velioglu Tue, 06 Feb 2018 14:46:07 +0000 + +citus (7.2.0.citus-1) stable; urgency=low + + * Adds support for CTEs + + * Adds support for subqueries that require merge step + + * Adds support for set operations (UNION, INTERSECT, ...) + + * Adds support for 2PC auto-recovery + + * Adds support for querying local tables in CTEs and subqueries + + * Adds support for more SQL coverage in subqueries for reference tables + + * Adds support for count(distinct) in queries with a subquery + + * Adds support for non-equijoins when there is already an equijoin + + * Adds support for real-time executor to run in transaction blocks + + * Adds infrastructure for storing intermediate distributed query results + + * Adds a new GUC named enable_repartition_joins for auto executor switch + + * Adds support for limiting the intermediate result size + + * Improves support for queries with unions containing filters + + * Improves support for queries with unions containing joins + + * Improves support for subqueries in the WHERE clause + + * Increases COPY throughput + + * Enables pushing down queries containing only recurring tuples and GROUP BY + + * Load-balance queries that read from 0 shards + + * Improves support for using functions in subqueries + + * Fixes a bug that causing real-time executor to crash during cancellation + + * Fixes a bug that causing real-time executor to get stuck on cancellation + + * Fixes a bug that could block modification queries unnecessarily + + * Fixes a bug that could cause assigning wrong IDs to transactions + + * Fixes a bug that could cause an assert failure with ANALYZE statements + + * Fixes a bug that would push down wrong set operations in subqueries + + * Fixes a bug that could cause a deadlock in create_distributed_table + + * Fixes a bug that could confuse user about ANALYZE usage + + * Fixes a bug causing false positive distributed deadlock detections + + * Relaxes the locking for DDL commands on partitioned tables + + * Relaxes the locking on COPY with replication + + * Logs more remote commands when citus.log_remote_commands is set + + -- Burak Velioglu Tue, 16 Jan 2018 14:34:20 +0000 + +citus (6.2.5.citus-1) stable; urgency=low + + * Fixes a bug that could crash the coordinator while reporting a remote error + + -- Burak Velioglu Thu, 11 Jan 2018 11:40:28 +0000 + +citus (7.1.2.citus-1) stable; urgency=low + + * Fixes a bug that could cause assigning wrong IDs to transactions + + * Increases COPY throughput + + -- Burak Velioglu Fri, 05 Jan 2018 09:00:07 +0000 + +citus (7.1.1.citus-1) stable; urgency=low + + * Fixes a bug preventing pushing down subqueries with reference tables + + * Fixes a bug that could create false positive distributed deadlocks + + * Fixes a bug that could prevent running concurrent COPY and multi-shard DDL + + * Fixes a bug that could mislead users about ANALYZE queries + + -- Burak Velioglu Tue, 05 Dec 2017 09:00:07 +0000 + +citus (7.1.0.citus-1) stable; urgency=low + + * Adds support for native queries with multi shard UPDATE/DELETE queries + + * Expands reference table support in subquery pushdown + + * Adds window function support for subqueries and INSERT ... SELECT queries + + * Adds support for COUNT(DISTINCT) [ON] queries on non-partition columns + + * Adds support for DISTINCT [ON] queries on non-partition columns + + * Introduces basic usage statistic collector + + * Adds support for setting replica identity while creating distributed tables + + * Adds support for ALTER TABLE ... REPLICA IDENTITY queries + + * Adds pushdown support for LIMIT and HAVING grouped by partition key + + * Adds support for INSERT ... SELECT queries via worker nodes on MX clusters + + * Adds support for adding primary key using already defined index + + * Adds replication parameter to shard copy functions + + * Changes shard_name UDF to omit public schema name + + * Adds master_move_node UDF to make changes on nodename/nodeport more easy + + * Fixes a bug that could cause casting error with INSERT ... SELECT queries + + * Fixes a bug that could prevent upgrading servers from Citus 6.1 + + * Fixes a bug that could prevent attaching partitions to a table in schema + + * Fixes a bug preventing adding nodes to clusters with reference tables + + * Fixes a bug that could cause a crash with INSERT ... SELECT queries + + * Fixes a bug that could prevent creating a partitoned table on Cloud + + * Implements various performance improvements + + * Adds internal infrastructures and tests to improve development process + + * Addresses various race conditions and deadlocks + + * Improves and standardizes error messages + + -- Burak Velioglu Wed, 15 Nov 2017 09:00:07 +0000 + +citus (7.0.3.citus-1) stable; urgency=low + + * Fixes several bugs that could cause crash + + * Fixes a bug that could cause deadlock while creating reference tables + + * Fixes a bug that could cause false-positives in deadlock detection + + * Fixes a bug that could cause 2PC recovery not to work from MX workers + + * Fixes a bug that could cause cache incohorency + + * Fixes a bug that could cause maintenance daemon to skip cache invalidations + + * Improves performance of transaction recovery by using correct index + + -- Burak Yucesoy Mon, 16 Oct 2017 11:52:07 +0000 + +citus (7.0.2.citus-1) stable; urgency=low + + * Updates task-tracker to limit file access + + -- Burak Yucesoy Thu, 28 Sep 2017 22:29:01 +0000 + +citus (6.2.4.citus-1) stable; urgency=low + + * Updates task-tracker to limit file access + + -- Burak Yucesoy Thu, 28 Sep 2017 21:31:35 +0000 + +citus (6.1.3.citus-1) stable; urgency=low + + * Updates task-tracker to limit file access + + -- Burak Yucesoy Thu, 28 Sep 2017 20:31:35 +0000 + +citus (7.0.1.citus-1) stable; urgency=low + + * Fixes a bug that could cause memory leaks in INSERT ... SELECT queries + + * Fixes a bug that could cause incorrect execution of prepared statements + + * Fixes a bug that could cause excessive memory usage during COPY + + * Incorporates latest changes from core PostgreSQL code + + -- Burak Yucesoy Tue, 12 Sep 2017 17:53:50 +0000 + +citus (7.0.0.citus-1) stable; urgency=low + + * Adds support for PostgreSQL 10 + + * Drops support for PostgreSQL 9.5 + + * Adds support for multi-row INSERT + + * Adds support for router UPDATE and DELETE queries with subqueries + + * Adds infrastructure for distributed deadlock detection + + * Deprecates enable_deadlock_prevention flag + + * Adds support for partitioned tables + + * Adds support for creating UNLOGGED tables + + * Adds support for SAVEPOINT + + * Adds UDF citus_create_restore_point for taking distributed snapshots + + * Adds support for evaluating non-pushable INSERT ... SELECT queries + + * Adds support for subquery pushdown on reference tables + + * Adds shard pruning support for IN and ANY + + * Adds support for UPDATE and DELETE commands that prune down to 0 shard + + * Enhances transaction support by relaxing some transaction restrictions + + * Fixes a bug causing crash if distributed table has no shards + + * Fixes a bug causing crash when removing inactive node + + * Fixes a bug causing failure during COPY on tables with dropped columns + + * Fixes a bug causing failure during DROP EXTENSION + + * Fixes a bug preventing executing VACUUM and INSERT concurrently + + * Fixes a bug in prepared INSERT statements containing an implicit cast + + * Fixes several issues related to statement cancellations and connections + + * Fixes several 2PC related issues + + * Removes an unnecessary dependency causing warning messages in pg_dump + + * Adds internal infrastructure for follower clusters + + * Adds internal infrastructure for progress tracking + + * Implements various performance improvements + + * Adds internal infrastructures and tests to improve development process + + * Addresses various race conditions and deadlocks + + * Improves and standardizes error messages + + -- Burak Yucesoy Mon, 28 Aug 2017 12:27:53 +0000 + +citus (6.2.3.citus-1) stable; urgency=low + + * Fixes a crash during execution of local CREATE INDEX CONCURRENTLY + + * Fixes a bug preventing usage of quoted column names in COPY + + * Fixes a bug in prepared INSERTs with implicit cast in partition column + + * Relaxes locks in VACUUM to ensure concurrent execution with INSERT + + -- Burak Yucesoy Thu, 13 Jul 2017 11:27:17 +0000 + +citus (6.2.2.citus-1) stable; urgency=low + + * Fixes a common cause of deadlocks when repairing tables with foreign keys + + -- Burak Velioglu Wed, 07 Jun 2017 09:42:17 +0000 + +citus (6.1.2.citus-1) stable; urgency=low + + * Fixes a common cause of deadlocks when repairing tables with foreign keys + + -- Jason Petersen Wed, 31 May 2017 16:14:11 +0000 + +citus (6.2.1.citus-1) stable; urgency=low + + * Relaxes version-check logic to avoid breaking non-distributed commands + + -- Jason Petersen Wed, 24 May 2017 22:36:07 +0000 + +citus (6.2.0.citus-1) stable; urgency=low + + * Increases SQL subquery coverage by pushing down more kinds of queries + + * Adds CustomScan API support to allow read-only transactions + + * Adds support for CREATE/DROP INDEX CONCURRENTLY + + * Adds support for ALTER TABLE ... ADD CONSTRAINT + + * Adds support for ALTER TABLE ... RENAME COLUMN + + * Adds support for DISABLE/ENABLE TRIGGER ALL + + * Adds support for expressions in the partition column in INSERTs + + * Adds support for query parameters in combination with function evaluation + + * Adds support for creating distributed tables from non-empty local tables + + * Adds UDFs to get size of distributed tables + + * Adds UDFs to add a new node without replicating reference tables + + * Adds checks to prevent running Citus binaries with wrong metadata tables + + * Improves shard pruning performance for range queries + + * Improves planner performance for joins involving co-located tables + + * Improves shard copy performance by creating indexes after copy + + * Improves task-tracker performance by batching several status checks + + * Enables router planner for queries on range partitioned table + + * Changes TRUNCATE to drop local data only if enable_ddl_propagation is off + + * Starts to execute DDL on coordinator before workers + + * Fixes a bug causing incorrectly reading invalidated cache + + * Fixes a bug related to creation of schemas in workers with incorrect owner + + * Fixes a bug related to concurrent run of shard drop functions + + * Fixes a bug related to EXPLAIN ANALYZE with DML queries + + * Fixes a bug related to SQL functions in FROM clause + + * Adds a GUC variable to report cross shard queries + + * Fixes a bug related to partition columns without native hash function + + * Adds internal infrastructures and tests to improve development process + + * Addresses various race conditions and deadlocks + + * Improves and standardizes error messages + + -- Burak Yucesoy Tue, 16 May 2017 16:05:22 +0000 + +citus (6.1.1.citus-1) stable; urgency=low + + * Fixes a crash caused by router executor use after connection timeouts + + * Fixes a crash caused by relation cache invalidation during COPY + + * Fixes bug related to DDL use within PL/pgSQL functions + + * Fixes a COPY bug related to types lacking binary output functions + + * Fixes a bug related to modifications with parameterized partition values + + * Fixes improper value interpolation in worker sequence generation + + * Guards shard pruning logic against zero-shard tables + + * Fixes possible NULL pointer dereference and buffer underflow, via PVS-Studio + + * Fixes a INSERT...SELECT bug that could push down non-partition column JOINs + + -- Metin Doslu Fri, 5 May 2017 17:42:00 +0000 + +citus (6.1.0.citus-1) stable; urgency=low + + * Implements reference tables, transactionally replicated to all nodes + + * Adds upgrade_to_reference_table UDF to upgrade pre-6.1 reference tables + + * Expands prepared statement support to nearly all statements + + * Adds support for creating VIEWs which reference distributed tables + + * Adds targeted VACUUM/ANALYZE support + + * Adds support for the FILTER clause in aggregate expressions + + * Adds support for function evaluation within INSERT INTO ... SELECT + + * Adds support for creating foreign key constraints with ALTER TABLE + + * Adds logic to choose router planner for all queries it supports + + * Enhances create_distributed_table with parameter for explicit colocation + + * Adds generally useful utility UDFs previously available as "Citus Tools" + + * Adds user-facing UDFs for locking shard resources and metadata + + * Refactors connection and transaction management for more consistency + + * Enhances COPY with fully transactional semantics + + * Improves support for cancellation for a number of queries and commands + + * Adds column_to_column_name UDF to help users understand partkey values + + * Adds master_disable_node UDF for temporarily disabling nodes + + * Adds proper MX ("masterless") metadata propagation logic + + * Adds start_metadata_sync_to_node UDF to propagate metadata changes to nodes + + * Enhances SERIAL compatibility with MX tables + + * Adds an node_connection_timeout parameter to set node connection timeouts + + * Adds enable_deadlock_prevention setting to permit multi-node transactions + + * Adds a replication_model setting to specify replication of new tables + + * Changes the shard_replication_factor setting's default value to one + + * Adds code to automatically set max_prepared_transactions if not configured + + * Accelerates lookup of colocated shard placements + + * Fixes a bug affecting INSERT INTO ... SELECT queries using constant values + + * Fixes a bug by ensuring COPY does not mark placements inactive + + * Fixes a bug affecting reads from pg_dist_shard_placement table + + * Fixes a crash triggered by creating a foreign key without a column + + * Fixes a crash related to accessing catalog tables after aborted transaction + + * Fixes a bug affecting JOIN queries requiring repartitions + + * Fixes a bug affecting node insertions to pg_dist_node table + + * Fixes a crash triggered by queries with modifying common table expressions + + * Fixes a bug affecting workloads with concurrent shard appends and deletions + + * Addresses various race conditions and deadlocks + + * Improves and standardizes error messages + + -- Burak Yucesoy Thu, 9 Feb 2017 16:17:41 +0000 + +citus (6.0.1.citus-3) stable; urgency=low + + * First build using new versioning practices + + -- Jason Petersen Wed, 8 Feb 2017 23:19:46 +0000 + +citus (6.0.1.citus-2) stable; urgency=low + + * Transitional package to guide users to new package name + + -- Jason Petersen Mon, 6 Feb 2017 16:33:44 +0000 + +citus (6.0.1.citus-1) stable; urgency=low + + * Fixes a bug causing failures during pg_upgrade + + * Fixes a bug preventing DML queries during colocated table creation + + * Fixes a bug that caused NULL parameters to be incorrectly passed as text + + -- Burak Yucesoy Wed, 30 Nov 2016 15:27:38 +0000 + +citus (6.0.0.citus-1) stable; urgency=low + + * Adds compatibility with PostgreSQL 9.6, now the recommended version + + * Removes the pg_worker_list.conf file in favor of a pg_dist_node table + + * Adds master_add_node and master_add_node UDFs to manage membership + + * Removes the \stage command and corresponding csql binary in favor of COPY + + * Removes copy_to_distributed_table in favor of first-class COPY support + + * Adds support for multiple DDL statements within a transaction + + * Adds support for certain foreign key constraints + + * Adds support for parallel INSERT INTO ... SELECT against colocated tables + + * Adds support for the TRUNCATE command + + * Adds support for HAVING clauses in SELECT queries + + * Adds support for EXCLUDE constraints which include the partition column + + * Adds support for system columns in queries (tableoid, ctid, etc.) + + * Adds support for relation name extension within INDEX definitions + + * Adds support for no-op UPDATEs of the partition column + + * Adds several general-purpose utility UDFs to aid in Citus maintenance + + * Adds master_expire_table_cache UDF to forcibly expire cached shards + + * Parallelizes the processing of DDL commands which affect distributed tables + + * Adds support for repartition jobs using composite or custom types + + * Enhances object name extension to handle long names and large shard counts + + * Parallelizes the master_modify_multiple_shards UDF + + * Changes distributed table creation to error if target table is not empty + + * Changes the pg_dist_shard.logicalrelid column from an oid to regclass + + * Adds a placementid column to pg_dist_shard_placement, replacing Oid use + + * Removes the pg_dist_shard.shardalias distribution metadata column + + * Adds pg_dist_partition.repmodel to track tables using streaming replication + + * Adds internal infrastructure to take snapshots of distribution metadata + + * Addresses the need to invalidate prepared statements on metadata changes + + * Adds a mark_tables_colocated UDF for denoting pre-6.0 manual colocation + + * Fixes a bug affecting prepared statement execution within PL/pgSQL + + * Fixes a bug affecting COPY commands using composite types + + * Fixes a bug that could cause crashes during EXPLAIN EXECUTE + + * Separates worker and master job temporary folders + + * Eliminates race condition between distributed modification and repair + + * Relaxes the requirement that shard repairs also repair colocated shards + + * Implements internal functions to track which tables' shards are colocated + + * Adds pg_dist_partition.colocationid to track colocation group membership + + * Extends shard copy and move operations to respect colocation settings + + * Adds pg_dist_local_group to prepare for future MX-related changes + + * Adds create_distributed_table to easily create shards and infer colocation + + -- Jason Petersen Tue, 8 Nov 2016 19:45:45 +0000 + +citus (5.2.2.citus-1) stable; urgency=low + + * Adds support for IF NOT EXISTS clause of CREATE INDEX command + + * Adds support for RETURN QUERY and FOR ... IN PL/pgSQL features + + * Extends the router planner to handle more queries + + * Changes COUNT of zero-row sets to return 0 rather than an empty result + + * Reduces the minimum permitted task_tracker_delay to a single millisecond + + * Fixes a bug that caused crashes during joins with a WHERE false clause + + * Fixes a bug triggered by unique violation errors raised in long txns + + * Fixes a bug resulting in multiple registration of transaction callbacks + + * Fixes a bug which could result in stale reads of distribution metadata + + * Fixes a bug preventing distributed modifications in some PL/pgSQL functions + + * Fixes some code paths that could hypothetically read uninitialized memory + + * Lowers log level of "waiting for activity" messages + + -- Jason Petersen Tue, 8 Nov 2016 18:43:37 +0000 + +citus (5.2.1.citus-1) stable; urgency=low + + * Fixes subquery pushdown to properly extract outer join qualifiers + + * Addresses possible memory leak during multi-shard transactions + + -- Jason Petersen Tue, 6 Sep 2016 20:47:15 +0000 + +citus (5.2.0.citus-1) stable; urgency=low + + * Drops support for PostgreSQL 9.4; PostgreSQL 9.5 is required + + * Adds schema support for tables, named objects (types, operators, etc.) + + * Evaluates non-immutable functions on master in all modification commands + + * Adds support for SERIAL types in non-partition columns + + * Adds support for RETURNING clause in INSERT, UPDATE, and DELETE commands + + * Adds support for multi-statement transactions using a fixed set of nodes + + * Full SQL support for SELECT queries which can be executed on single worker + + * Adds option to perform DDL changes using prepared transactions (2PC) + + * Adds an enable_ddl_propagation parameter to control DDL propagation + + * Accelerates shard pruning during merges + + * Adds master_modify_multiple_shards UDF to modify many shards at once + + * Adds COPY support for arrays of user-defined types + + * Now supports parameterized prepared statements for certain use cases + + * Extends LIMIT/OFFSET support to all executor types + + * Constraint violations now fail fast rather than hitting all placements + + * Makes master_create_empty_shard aware of shard placement policy + + * Reduces unnecessary sleep during queries processed by real-time executor + + * Improves task tracker executor's task cleanup logic + + * Relaxes restrictions on cancellation of DDL commands + + * Removes ONLY keyword from worker SELECT queries + + * Error message improvements and standardization + + * Moves master_update_shard_statistics function to pg_catalog schema + + * Fixes a bug where hash-partitioned anti-joins could return bad results + + * Now sets storage type correctly for foreign table-backed shards + + * Fixes master_update_shard_statistics issue with hash-partitioned tables + + * Fixes an issue related to extending table names that require escaping + + * Reduces risk of row counter overflows during modifications + + * Fixes a crash related to FILTER clause use in COUNT DISTINCT subqueries + + * Fixes crashes related to partition columns with high attribute numbers + + * Fixes certain subquery and join crashes + + * Detects flex for build even if PostgreSQL was built without it + + * Fixes assert-enabled crash when all_modifications_commutative is true + + -- Jason Petersen Wed, 17 Aug 2016 10:23:21 +0000 + +citus (5.2.0~rc.1-1) testing; urgency=low + + * Release candidate for 5.2. + + -- Jason Petersen Mon, 01 Aug 2016 17:01:05 +0000 + +citus (5.1.1-1) stable; urgency=low + + * Adds complex count distinct expression support in repartitioned subqueries + + * Improves task tracker job cleanup logic, addressing a memory leak + + * Fixes bug that generated incorrect results for LEFT JOIN queries + + * Improves compatibility with Debian's reproducible builds project + + * Fixes build issues on FreeBSD platforms + + -- Jason Petersen Fri, 17 Jun 2016 16:20:15 +0000 + +citus (5.1.0-1) stable; urgency=low + + * Adds distributed COPY to rapidly populate distributed tables + + * Adds support for using EXPLAIN on distributed queries + + * Recognizes and fast-paths single-shard SELECT statements automatically + + * Increases INSERT throughput via shard pruning optimizations + + * Improves planner performance for joins involving tables with many shards + + * Adds ability to pass columns as arguments to function calls in UPDATEs + + * Introduces transaction manager for use by multi-shard commands + + * Adds COUNT(DISTINCT ...) pushdown optimization for hash-partitioned tables + + * Adds support for some UNIQUE indexes on hash- or range-partitioned tables + + * Deprecates \stage in favor of using COPY for append-partition tables + + * Deprecates copy_to_distributed_table in favor of first-class COPY support + + * Fixes build problems when using non-packaged PostgreSQL installs + + * Fixes bug that sometimes skipped pruning when partitioned by VARCHAR column + + * Fixes bug impeding use of user functions in repartitioned subqueries + + * Fixes bug involving queries with equality comparisons of boolean types + + * Fixes crash that prevented use alongside pg_stat_statements + + * Fixes crash arising from SELECT queries that lack a target list + + * Improves warning and error messages + + -- Jason Petersen Tue, 17 May 2016 16:55:02 +0000 + +citus (5.1.0~rc.2-1) testing; urgency=low + + * Fix EXPLAIN output when FORMAT JSON in use + + -- Jason Petersen Mon, 16 May 2016 11:08:09 +0000 + +citus (5.1.0~rc.1-1) testing; urgency=low + + * Release candidate for 5.1. + + -- Jason Petersen Wed, 04 May 2016 19:26:23 +0000 + +citus (5.0.1-1) stable; urgency=low + + * Fixes issues on 32-bit systems + + -- Jason Petersen Fri, 15 Apr 2016 19:17:35 +0000 + +citus (5.0.0-1) stable; urgency=low + + * Initial release + + -- Jason Petersen Thu, 24 Mar 2016 10:12:52 -0400 diff --git a/packaging_automation/tests/files/get_postgres_versions_tests/pg_exclude.yml b/packaging_automation/tests/files/get_postgres_versions_tests/pg_exclude.yml index 0839766b..16636e2c 100644 --- a/packaging_automation/tests/files/get_postgres_versions_tests/pg_exclude.yml +++ b/packaging_automation/tests/files/get_postgres_versions_tests/pg_exclude.yml @@ -1,5 +1,5 @@ -exclude: - nightly: - ol/7: [15] - release: - all: [15] +exclude: + nightly: + ol/7: [15] + release: + all: [15] diff --git a/packaging_automation/tests/files/get_postgres_versions_tests/pkgvars b/packaging_automation/tests/files/get_postgres_versions_tests/pkgvars index 152f1689..d9ea5098 100644 --- a/packaging_automation/tests/files/get_postgres_versions_tests/pkgvars +++ b/packaging_automation/tests/files/get_postgres_versions_tests/pkgvars @@ -1,4 +1,4 @@ -pkgname=citus -pkgdesc='Citus (Open-Source)' -pkglatest=11.0.5 -versioning=fancy +pkgname=citus +pkgdesc='Citus (Open-Source)' +pkglatest=11.0.5 +versioning=fancy diff --git a/packaging_automation/tests/files/get_postgres_versions_tests/postgres-matrix.yml b/packaging_automation/tests/files/get_postgres_versions_tests/postgres-matrix.yml index 8fadcfa6..cb75825b 100644 --- a/packaging_automation/tests/files/get_postgres_versions_tests/postgres-matrix.yml +++ b/packaging_automation/tests/files/get_postgres_versions_tests/postgres-matrix.yml @@ -1,22 +1,22 @@ -### THIS FILE IS ONLY FOR UNIT TESTS. ACTUAL POSTGRES MATRIX FILES ARE IN PACKAGING PROJECT BRANCHES ### -name: Postgres Version Matrix -project_name: citus # alternatives: citus, citus-enterprise, pg-auto-failover, pg-auto-failover-enterprise -# There is one configuration like this for each project in packaging repo -# i.e. in all-citus, all-enterprise, all-pgautofailover, all-pgautofailover-enterprise, pgxn-citus etc. -version_matrix: - - 8.0: - postgres_versions: [10, 11] - - 9.0: - postgres_versions: [11, 12] - - 9.5: - postgres_versions: [11, 12, 13] - - 10.1: - postgres_versions: [12, 13] - - 10.2: - postgres_versions: [12, 13, 14] - - 11.0: - postgres_versions: [13, 14] - - 11.1: - postgres_versions: [ 13, 14, 15 ] - - 12.0: - postgres_versions: [ 14, 15 ] +### THIS FILE IS ONLY FOR UNIT TESTS. ACTUAL POSTGRES MATRIX FILES ARE IN PACKAGING PROJECT BRANCHES ### +name: Postgres Version Matrix +project_name: citus # alternatives: citus, citus-enterprise, pg-auto-failover, pg-auto-failover-enterprise +# There is one configuration like this for each project in packaging repo +# i.e. in all-citus, all-enterprise, all-pgautofailover, all-pgautofailover-enterprise, pgxn-citus etc. +version_matrix: + - 8.0: + postgres_versions: [10, 11] + - 9.0: + postgres_versions: [11, 12] + - 9.5: + postgres_versions: [11, 12, 13] + - 10.1: + postgres_versions: [12, 13] + - 10.2: + postgres_versions: [12, 13, 14] + - 11.0: + postgres_versions: [13, 14] + - 11.1: + postgres_versions: [ 13, 14, 15 ] + - 12.0: + postgres_versions: [ 14, 15 ] diff --git a/packaging_automation/tests/files/gpg/packaging.gpg b/packaging_automation/tests/files/gpg/packaging.gpg index 39617331..bec537df 100644 --- a/packaging_automation/tests/files/gpg/packaging.gpg +++ b/packaging_automation/tests/files/gpg/packaging.gpg @@ -1,14 +1,14 @@ -%echo Generating a basic OpenPGP key -Key-Type: RSA -Key-Length: 2048 -Subkey-Type: RSA -Subkey-Length: 2048 -Name-Real: Citus Data -Name-Email: packaging@citusdata.com -Expire-Date: 0 -%no-ask-passphrase -%no-protection - -# Do a commit here, so that we can later print "done" :-) -%commit +%echo Generating a basic OpenPGP key +Key-Type: RSA +Key-Length: 2048 +Subkey-Type: RSA +Subkey-Length: 2048 +Name-Real: Citus Data +Name-Email: packaging@citusdata.com +Expire-Date: 0 +%no-ask-passphrase +%no-protection + +# Do a commit here, so that we can later print "done" :-) +%commit %echo done \ No newline at end of file diff --git a/packaging_automation/tests/files/gpg/packaging_with_passphrase.gpg b/packaging_automation/tests/files/gpg/packaging_with_passphrase.gpg index 07be70eb..b7e954fa 100644 --- a/packaging_automation/tests/files/gpg/packaging_with_passphrase.gpg +++ b/packaging_automation/tests/files/gpg/packaging_with_passphrase.gpg @@ -1,13 +1,13 @@ -%echo Generating a basic OpenPGP key -Key-Type: RSA -Key-Length: 2048 -Subkey-Type: RSA -Subkey-Length: 2048 -Name-Real: Citus Data -Name-Email: packaging@citusdata.com -Expire-Date: 0 -Passphrase: Citus123 - -# Do a commit here, so that we can later print "done" :-) -%commit +%echo Generating a basic OpenPGP key +Key-Type: RSA +Key-Length: 2048 +Subkey-Type: RSA +Subkey-Length: 2048 +Name-Real: Citus Data +Name-Email: packaging@citusdata.com +Expire-Date: 0 +Passphrase: Citus123 + +# Do a commit here, so that we can later print "done" :-) +%commit %echo done \ No newline at end of file diff --git a/packaging_automation/tests/files/packaging_warning/packaging_ignore.yml b/packaging_automation/tests/files/packaging_warning/packaging_ignore.yml index d5b576e8..177cb3c1 100644 --- a/packaging_automation/tests/files/packaging_warning/packaging_ignore.yml +++ b/packaging_automation/tests/files/packaging_warning/packaging_ignore.yml @@ -1,15 +1,15 @@ -base: - - "^/.* warning: ignoring old recipe for target 'check'" - - "sh: warning: setlocale: LC_ALL: cannot change locale \\(C.utf8\\): No such file or directory" - - "dpkg-buildpackage: warning: using a gain-root-command while being root" - - "/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory" - - "warning: line \\d+: multiple %files for package *" - - "configure: WARNING: unrecognized options: --disable-dependency-tracking" - - "WARNING: If you run into issues during linking or bitcode compilation, you can use --without-security-flags." - - "gpg: WARNING: options in `/root/.gnupg/gpg.conf' are not yet active during this run" -debian: - - ".*: W: invalid-license Commercial" - - ".*: W: no-documentation" -rpm: - - ".*: W: invalid-license Commercial" - - ".*: W: no-documentation" +base: + - "^/.* warning: ignoring old recipe for target 'check'" + - "sh: warning: setlocale: LC_ALL: cannot change locale \\(C.utf8\\): No such file or directory" + - "dpkg-buildpackage: warning: using a gain-root-command while being root" + - "/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory" + - "warning: line \\d+: multiple %files for package *" + - "configure: WARNING: unrecognized options: --disable-dependency-tracking" + - "WARNING: If you run into issues during linking or bitcode compilation, you can use --without-security-flags." + - "gpg: WARNING: options in `/root/.gnupg/gpg.conf' are not yet active during this run" +debian: + - ".*: W: invalid-license Commercial" + - ".*: W: no-documentation" +rpm: + - ".*: W: invalid-license Commercial" + - ".*: W: no-documentation" diff --git a/packaging_automation/tests/files/packaging_warning/packaging_ignore_without_rpm_rules.yml b/packaging_automation/tests/files/packaging_warning/packaging_ignore_without_rpm_rules.yml index 8157f317..7a91cc6e 100644 --- a/packaging_automation/tests/files/packaging_warning/packaging_ignore_without_rpm_rules.yml +++ b/packaging_automation/tests/files/packaging_warning/packaging_ignore_without_rpm_rules.yml @@ -1,12 +1,12 @@ -base: - - "^/.* warning: ignoring old recipe for target 'check'" - - "sh: warning: setlocale: LC_ALL: cannot change locale \\(C.utf8\\): No such file or directory" - - "dpkg-buildpackage: warning: using a gain-root-command while being root" - - "/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory" - - "warning: line \\d+: multiple %files for package *" - - "configure: WARNING: unrecognized options: --disable-dependency-tracking" - - "WARNING: If you run into issues during linking or bitcode compilation, you can use --without-security-flags." - - "gpg: WARNING: options in `/root/.gnupg/gpg.conf' are not yet active during this run" -debian: - - ".*: W: invalid-license Commercial" - - ".*: W: no-documentation" +base: + - "^/.* warning: ignoring old recipe for target 'check'" + - "sh: warning: setlocale: LC_ALL: cannot change locale \\(C.utf8\\): No such file or directory" + - "dpkg-buildpackage: warning: using a gain-root-command while being root" + - "/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory" + - "warning: line \\d+: multiple %files for package *" + - "configure: WARNING: unrecognized options: --disable-dependency-tracking" + - "WARNING: If you run into issues during linking or bitcode compilation, you can use --without-security-flags." + - "gpg: WARNING: options in `/root/.gnupg/gpg.conf' are not yet active during this run" +debian: + - ".*: W: invalid-license Commercial" + - ".*: W: no-documentation" diff --git a/packaging_automation/tests/files/packaging_warning/sample_warning_build_output_deb.txt b/packaging_automation/tests/files/packaging_warning/sample_warning_build_output_deb.txt index f23c935b..b2836f78 100644 --- a/packaging_automation/tests/files/packaging_warning/sample_warning_build_output_deb.txt +++ b/packaging_automation/tests/files/packaging_warning/sample_warning_build_output_deb.txt @@ -1,165 +1,165 @@ -tar: Removing leading `//' from member names -+ umask 022 -+ cd //citus-rpm-build -+ cd /citus-rpm-build -+ rm -rf pg_cron-1.3.1 -+ /usr/bin/gzip -dc /citus-rpm-build/36d47bf9f7eb569f43cd98ff426764f59c286508 -+ /usr/bin/tar -xof - -+ STATUS=0 -+ '[' 0 -ne 0 ']' -+ cd pg_cron-1.3.1 -+ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ PATH=/usr/pgsql-10/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -+ make -j2 -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ '[' /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 '!=' / ']' -+ rm -rf /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 -++ dirname /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 -+ mkdir -p /root/rpmbuild/BUILDROOT -+ mkdir /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 -+ cd pg_cron-1.3.1 -+ PATH=/usr/pgsql-10/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -+ /usr/bin/make install DESTDIR=/root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 'INSTALL=/usr/bin/install -p' -+ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/pgsql-10/doc/extension -+ /usr/bin/cp README.md /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/pgsql-10/doc/extension/README-pg_cron.md -+ /usr/lib/rpm/find-debuginfo.sh -j2 --strict-build-id -m -i --build-id-seed 1.3.1-1.el8 --unique-debug-suffix -1.3.1-1.el8.x86_64 --unique-debug-src-base pg_cron_10-1.3.1-1.el8.x86_64 --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 110000000 -S debugsourcefiles.list //citus-rpm-build/pg_cron-1.3.1 -224 blocks -+ /usr/lib/rpm/check-buildroot -+ /usr/lib/rpm/redhat/brp-ldconfig -/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory -Warning: Unhandled -+ /usr/lib/rpm/brp-compress -+ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip -+ /usr/lib/rpm/brp-python-bytecompile '' 1 -+ /usr/lib/rpm/brp-python-hardlink -+ PYTHON3=/usr/libexec/platform-python -+ /usr/lib/rpm/redhat/brp-mangle-shebangs -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ DOCDIR=/root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 -+ export LC_ALL=C -+ LC_ALL=C -+ export DOCDIR -+ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 -+ cp -pr CHANGELOG.md /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ /usr/bin/rm -rf /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 -+ exit 0 -tar: Removing leading `//' from member names -+ umask 022 -+ cd //citus-rpm-build -+ cd /citus-rpm-build -+ rm -rf pg_cron-1.3.1 -+ /usr/bin/gzip -dc /citus-rpm-build/36d47bf9f7eb569f43cd98ff426764f59c286508 -+ /usr/bin/tar -xof - -+ STATUS=0 -+ '[' 0 -ne 0 ']' -+ cd pg_cron-1.3.1 -+ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ PATH=/usr/pgsql-11/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -+ make -j2 -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ '[' /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 '!=' / ']' -+ rm -rf /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 -++ dirname /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 -+ mkdir -p /root/rpmbuild/BUILDROOT -+ mkdir /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 -+ cd pg_cron-1.3.1 -+ PATH=/usr/pgsql-11/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -+ /usr/bin/make install DESTDIR=/root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 'INSTALL=/usr/bin/install -p' -+ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/pgsql-11/doc/extension -+ /usr/bin/cp README.md /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/pgsql-11/doc/extension/README-pg_cron.md -+ /usr/lib/rpm/find-debuginfo.sh -j2 --strict-build-id -m -i --build-id-seed 1.3.1-1.el8 --unique-debug-suffix -1.3.1-1.el8.x86_64 --unique-debug-src-base pg_cron_11-1.3.1-1.el8.x86_64 --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 110000000 -S debugsourcefiles.list //citus-rpm-build/pg_cron-1.3.1 -224 blocks -+ /usr/lib/rpm/check-buildroot -+ /usr/lib/rpm/redhat/brp-ldconfig -/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory -+ /usr/lib/rpm/brp-compress -+ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip -+ /usr/lib/rpm/brp-python-bytecompile '' 1 -+ /usr/lib/rpm/brp-python-hardlink -+ PYTHON3=/usr/libexec/platform-python -+ /usr/lib/rpm/redhat/brp-mangle-shebangs -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ DOCDIR=/root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 -+ export LC_ALL=C -+ LC_ALL=C -+ export DOCDIR -+ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 -+ cp -pr CHANGELOG.md /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ /usr/bin/rm -rf /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 -warning: line 345: multiple %files for package 'citus-enterprise100_11' -/usr/lib/postgresql/12/lib/pgxs/src/makefiles/pgxs.mk:433: warning: ignoring old recipe for target 'check' -sh: warning: setlocale: LC_ALL: cannot change locale (C.utf8): No such file or directory -dpkg-buildpackage: warning: using a gain-root-command while being root -/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory -configure: WARNING: unrecognized options: --disable-dependency-tracking -WARNING: If you run into issues during linking or bitcode compilation, you can use --without-security-flags. -gpg: directory `/root/.gnupg' created -gpg: new configuration file `/root/.gnupg/gpg.conf' created -gpg: WARNING: options in `/root/.gnupg/gpg.conf' are not yet active during this run -gpg: keyring `/root/.gnupg/secring.gpg' created -gpg: keyring `/root/.gnupg/pubring.gpg' created -gpg: key 3F95D6C6: secret key imported -gpg: /root/.gnupg/trustdb.gpg: trustdb created -gpg: key 3F95D6C6: public key "Citus Data " imported -gpg: Total number processed: 1 -gpg: imported: 1 (RSA: 1) -gpg: secret keys read: 1 -gpg: secret keys imported: 1 -Now running lintian --profile debian --allow-root citus-enterprise_10.1.2.citus-1_amd64.changes ... -pg_cron_10-debugsource.x86_64: W: no-documentation -pg_cron_11-debugsource.x86_64: W: no-documentation -pg_cron_12-debugsource.x86_64: W: no-documentation -pg_cron_13-debugsource.x86_64: W: no-documentation -citus-enterprise100_11.x86_64: W: invalid-license Commercial -citus-enterprise100_11.x86_64: W: invalid-date-format -citus-enterprise100_11.x86_64: E: zero-length /usr/pgsql-/usr/lib/share/extension/ - - -Removing centos-8/ -Removing citus_package.log -Removing tools/.gitignore -Removing tools/CHANGELOG.md -Removing tools/HomebrewFormula/ -Removing tools/Makefile -Removing tools/README.md -Removing tools/automated_packaging/ -Removing tools/citus_dev/ -Removing tools/dashboard/ -Removing tools/packaging/Makefile -Removing tools/packaging/README.md -Removing tools/packaging/citus_package -Removing tools/travis/ -Removing tools/uncrustify/Makefile -Removing tools/uncrustify/README.md -Removing tools/uncrustify/citus-style.cfg -Removing tools/uncrustify/citus_indent -Removing tools/valgrind/ -The command "build_new_release" exited with 0. -Skipping a deployment with the packagecloud provider because this branch is not permitted: redhat-cron-1.3.1 -Skipping a deployment with the packagecloud provider because this branch is not permitted: redhat-cron-1.3.1 -Skipping a deployment with the packagecloud provider because a custom condition was not met +tar: Removing leading `//' from member names ++ umask 022 ++ cd //citus-rpm-build ++ cd /citus-rpm-build ++ rm -rf pg_cron-1.3.1 ++ /usr/bin/gzip -dc /citus-rpm-build/36d47bf9f7eb569f43cd98ff426764f59c286508 ++ /usr/bin/tar -xof - ++ STATUS=0 ++ '[' 0 -ne 0 ']' ++ cd pg_cron-1.3.1 ++ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ PATH=/usr/pgsql-10/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ++ make -j2 ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ '[' /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 '!=' / ']' ++ rm -rf /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 +++ dirname /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 ++ mkdir -p /root/rpmbuild/BUILDROOT ++ mkdir /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 ++ cd pg_cron-1.3.1 ++ PATH=/usr/pgsql-10/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ++ /usr/bin/make install DESTDIR=/root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 'INSTALL=/usr/bin/install -p' ++ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/pgsql-10/doc/extension ++ /usr/bin/cp README.md /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/pgsql-10/doc/extension/README-pg_cron.md ++ /usr/lib/rpm/find-debuginfo.sh -j2 --strict-build-id -m -i --build-id-seed 1.3.1-1.el8 --unique-debug-suffix -1.3.1-1.el8.x86_64 --unique-debug-src-base pg_cron_10-1.3.1-1.el8.x86_64 --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 110000000 -S debugsourcefiles.list //citus-rpm-build/pg_cron-1.3.1 +224 blocks ++ /usr/lib/rpm/check-buildroot ++ /usr/lib/rpm/redhat/brp-ldconfig +/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory +Warning: Unhandled ++ /usr/lib/rpm/brp-compress ++ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip ++ /usr/lib/rpm/brp-python-bytecompile '' 1 ++ /usr/lib/rpm/brp-python-hardlink ++ PYTHON3=/usr/libexec/platform-python ++ /usr/lib/rpm/redhat/brp-mangle-shebangs ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ DOCDIR=/root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 ++ export LC_ALL=C ++ LC_ALL=C ++ export DOCDIR ++ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 ++ cp -pr CHANGELOG.md /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ /usr/bin/rm -rf /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 ++ exit 0 +tar: Removing leading `//' from member names ++ umask 022 ++ cd //citus-rpm-build ++ cd /citus-rpm-build ++ rm -rf pg_cron-1.3.1 ++ /usr/bin/gzip -dc /citus-rpm-build/36d47bf9f7eb569f43cd98ff426764f59c286508 ++ /usr/bin/tar -xof - ++ STATUS=0 ++ '[' 0 -ne 0 ']' ++ cd pg_cron-1.3.1 ++ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ PATH=/usr/pgsql-11/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ++ make -j2 ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ '[' /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 '!=' / ']' ++ rm -rf /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 +++ dirname /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 ++ mkdir -p /root/rpmbuild/BUILDROOT ++ mkdir /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 ++ cd pg_cron-1.3.1 ++ PATH=/usr/pgsql-11/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ++ /usr/bin/make install DESTDIR=/root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 'INSTALL=/usr/bin/install -p' ++ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/pgsql-11/doc/extension ++ /usr/bin/cp README.md /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/pgsql-11/doc/extension/README-pg_cron.md ++ /usr/lib/rpm/find-debuginfo.sh -j2 --strict-build-id -m -i --build-id-seed 1.3.1-1.el8 --unique-debug-suffix -1.3.1-1.el8.x86_64 --unique-debug-src-base pg_cron_11-1.3.1-1.el8.x86_64 --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 110000000 -S debugsourcefiles.list //citus-rpm-build/pg_cron-1.3.1 +224 blocks ++ /usr/lib/rpm/check-buildroot ++ /usr/lib/rpm/redhat/brp-ldconfig +/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory ++ /usr/lib/rpm/brp-compress ++ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip ++ /usr/lib/rpm/brp-python-bytecompile '' 1 ++ /usr/lib/rpm/brp-python-hardlink ++ PYTHON3=/usr/libexec/platform-python ++ /usr/lib/rpm/redhat/brp-mangle-shebangs ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ DOCDIR=/root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 ++ export LC_ALL=C ++ LC_ALL=C ++ export DOCDIR ++ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 ++ cp -pr CHANGELOG.md /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ /usr/bin/rm -rf /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 +warning: line 345: multiple %files for package 'citus-enterprise100_11' +/usr/lib/postgresql/12/lib/pgxs/src/makefiles/pgxs.mk:433: warning: ignoring old recipe for target 'check' +sh: warning: setlocale: LC_ALL: cannot change locale (C.utf8): No such file or directory +dpkg-buildpackage: warning: using a gain-root-command while being root +/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory +configure: WARNING: unrecognized options: --disable-dependency-tracking +WARNING: If you run into issues during linking or bitcode compilation, you can use --without-security-flags. +gpg: directory `/root/.gnupg' created +gpg: new configuration file `/root/.gnupg/gpg.conf' created +gpg: WARNING: options in `/root/.gnupg/gpg.conf' are not yet active during this run +gpg: keyring `/root/.gnupg/secring.gpg' created +gpg: keyring `/root/.gnupg/pubring.gpg' created +gpg: key 3F95D6C6: secret key imported +gpg: /root/.gnupg/trustdb.gpg: trustdb created +gpg: key 3F95D6C6: public key "Citus Data " imported +gpg: Total number processed: 1 +gpg: imported: 1 (RSA: 1) +gpg: secret keys read: 1 +gpg: secret keys imported: 1 +Now running lintian --profile debian --allow-root citus-enterprise_10.1.2.citus-1_amd64.changes ... +pg_cron_10-debugsource.x86_64: W: no-documentation +pg_cron_11-debugsource.x86_64: W: no-documentation +pg_cron_12-debugsource.x86_64: W: no-documentation +pg_cron_13-debugsource.x86_64: W: no-documentation +citus-enterprise100_11.x86_64: W: invalid-license Commercial +citus-enterprise100_11.x86_64: W: invalid-date-format +citus-enterprise100_11.x86_64: E: zero-length /usr/pgsql-/usr/lib/share/extension/ + + +Removing centos-8/ +Removing citus_package.log +Removing tools/.gitignore +Removing tools/CHANGELOG.md +Removing tools/HomebrewFormula/ +Removing tools/Makefile +Removing tools/README.md +Removing tools/automated_packaging/ +Removing tools/citus_dev/ +Removing tools/dashboard/ +Removing tools/packaging/Makefile +Removing tools/packaging/README.md +Removing tools/packaging/citus_package +Removing tools/travis/ +Removing tools/uncrustify/Makefile +Removing tools/uncrustify/README.md +Removing tools/uncrustify/citus-style.cfg +Removing tools/uncrustify/citus_indent +Removing tools/valgrind/ +The command "build_new_release" exited with 0. +Skipping a deployment with the packagecloud provider because this branch is not permitted: redhat-cron-1.3.1 +Skipping a deployment with the packagecloud provider because this branch is not permitted: redhat-cron-1.3.1 +Skipping a deployment with the packagecloud provider because a custom condition was not met Done. Your build exited with 0. \ No newline at end of file diff --git a/packaging_automation/tests/files/packaging_warning/sample_warning_build_output_deb_only_base.txt b/packaging_automation/tests/files/packaging_warning/sample_warning_build_output_deb_only_base.txt index 638ef70e..d7a144e3 100644 --- a/packaging_automation/tests/files/packaging_warning/sample_warning_build_output_deb_only_base.txt +++ b/packaging_automation/tests/files/packaging_warning/sample_warning_build_output_deb_only_base.txt @@ -1,156 +1,156 @@ -tar: Removing leading `//' from member names -+ umask 022 -+ cd //citus-rpm-build -+ cd /citus-rpm-build -+ rm -rf pg_cron-1.3.1 -+ /usr/bin/gzip -dc /citus-rpm-build/36d47bf9f7eb569f43cd98ff426764f59c286508 -+ /usr/bin/tar -xof - -+ STATUS=0 -+ '[' 0 -ne 0 ']' -+ cd pg_cron-1.3.1 -+ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ PATH=/usr/pgsql-10/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -+ make -j2 -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ '[' /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 '!=' / ']' -+ rm -rf /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 -++ dirname /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 -+ mkdir -p /root/rpmbuild/BUILDROOT -+ mkdir /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 -+ cd pg_cron-1.3.1 -+ PATH=/usr/pgsql-10/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -+ /usr/bin/make install DESTDIR=/root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 'INSTALL=/usr/bin/install -p' -+ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/pgsql-10/doc/extension -+ /usr/bin/cp README.md /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/pgsql-10/doc/extension/README-pg_cron.md -+ /usr/lib/rpm/find-debuginfo.sh -j2 --strict-build-id -m -i --build-id-seed 1.3.1-1.el8 --unique-debug-suffix -1.3.1-1.el8.x86_64 --unique-debug-src-base pg_cron_10-1.3.1-1.el8.x86_64 --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 110000000 -S debugsourcefiles.list //citus-rpm-build/pg_cron-1.3.1 -224 blocks -+ /usr/lib/rpm/check-buildroot -+ /usr/lib/rpm/redhat/brp-ldconfig -/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory -Warning: Unhandled -+ /usr/lib/rpm/brp-compress -+ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip -+ /usr/lib/rpm/brp-python-bytecompile '' 1 -+ /usr/lib/rpm/brp-python-hardlink -+ PYTHON3=/usr/libexec/platform-python -+ /usr/lib/rpm/redhat/brp-mangle-shebangs -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ DOCDIR=/root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 -+ export LC_ALL=C -+ LC_ALL=C -+ export DOCDIR -+ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 -+ cp -pr CHANGELOG.md /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ /usr/bin/rm -rf /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 -+ exit 0 -tar: Removing leading `//' from member names -+ umask 022 -+ cd //citus-rpm-build -+ cd /citus-rpm-build -+ rm -rf pg_cron-1.3.1 -+ /usr/bin/gzip -dc /citus-rpm-build/36d47bf9f7eb569f43cd98ff426764f59c286508 -+ /usr/bin/tar -xof - -+ STATUS=0 -+ '[' 0 -ne 0 ']' -+ cd pg_cron-1.3.1 -+ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ PATH=/usr/pgsql-11/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -+ make -j2 -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ '[' /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 '!=' / ']' -+ rm -rf /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 -++ dirname /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 -+ mkdir -p /root/rpmbuild/BUILDROOT -+ mkdir /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 -+ cd pg_cron-1.3.1 -+ PATH=/usr/pgsql-11/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -+ /usr/bin/make install DESTDIR=/root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 'INSTALL=/usr/bin/install -p' -+ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/pgsql-11/doc/extension -+ /usr/bin/cp README.md /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/pgsql-11/doc/extension/README-pg_cron.md -+ /usr/lib/rpm/find-debuginfo.sh -j2 --strict-build-id -m -i --build-id-seed 1.3.1-1.el8 --unique-debug-suffix -1.3.1-1.el8.x86_64 --unique-debug-src-base pg_cron_11-1.3.1-1.el8.x86_64 --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 110000000 -S debugsourcefiles.list //citus-rpm-build/pg_cron-1.3.1 -224 blocks -+ /usr/lib/rpm/check-buildroot -+ /usr/lib/rpm/redhat/brp-ldconfig -/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory -+ /usr/lib/rpm/brp-compress -+ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip -+ /usr/lib/rpm/brp-python-bytecompile '' 1 -+ /usr/lib/rpm/brp-python-hardlink -+ PYTHON3=/usr/libexec/platform-python -+ /usr/lib/rpm/redhat/brp-mangle-shebangs -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ DOCDIR=/root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 -+ export LC_ALL=C -+ LC_ALL=C -+ export DOCDIR -+ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 -+ cp -pr CHANGELOG.md /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ /usr/bin/rm -rf /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 -warning: line 345: multiple %files for package 'citus-enterprise100_11' -/usr/lib/postgresql/12/lib/pgxs/src/makefiles/pgxs.mk:433: warning: ignoring old recipe for target 'check' -sh: warning: setlocale: LC_ALL: cannot change locale (C.utf8): No such file or directory -dpkg-buildpackage: warning: using a gain-root-command while being root -/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory -configure: WARNING: unrecognized options: --disable-dependency-tracking -WARNING: If you run into issues during linking or bitcode compilation, you can use --without-security-flags. -gpg: directory `/root/.gnupg' created -gpg: new configuration file `/root/.gnupg/gpg.conf' created -gpg: WARNING: options in `/root/.gnupg/gpg.conf' are not yet active during this run -gpg: keyring `/root/.gnupg/secring.gpg' created -gpg: keyring `/root/.gnupg/pubring.gpg' created -gpg: key 3F95D6C6: secret key imported -gpg: /root/.gnupg/trustdb.gpg: trustdb created -gpg: key 3F95D6C6: public key "Citus Data " imported -gpg: Total number processed: 1 -gpg: imported: 1 (RSA: 1) -gpg: secret keys read: 1 -gpg: secret keys imported: 1 - -Removing centos-8/ -Removing citus_package.log -Removing tools/.gitignore -Removing tools/CHANGELOG.md -Removing tools/HomebrewFormula/ -Removing tools/Makefile -Removing tools/README.md -Removing tools/automated_packaging/ -Removing tools/citus_dev/ -Removing tools/dashboard/ -Removing tools/packaging/Makefile -Removing tools/packaging/README.md -Removing tools/packaging/citus_package -Removing tools/travis/ -Removing tools/uncrustify/Makefile -Removing tools/uncrustify/README.md -Removing tools/uncrustify/citus-style.cfg -Removing tools/uncrustify/citus_indent -Removing tools/valgrind/ -The command "build_new_release" exited with 0. -Skipping a deployment with the packagecloud provider because this branch is not permitted: redhat-cron-1.3.1 -Skipping a deployment with the packagecloud provider because this branch is not permitted: redhat-cron-1.3.1 -Skipping a deployment with the packagecloud provider because a custom condition was not met +tar: Removing leading `//' from member names ++ umask 022 ++ cd //citus-rpm-build ++ cd /citus-rpm-build ++ rm -rf pg_cron-1.3.1 ++ /usr/bin/gzip -dc /citus-rpm-build/36d47bf9f7eb569f43cd98ff426764f59c286508 ++ /usr/bin/tar -xof - ++ STATUS=0 ++ '[' 0 -ne 0 ']' ++ cd pg_cron-1.3.1 ++ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ PATH=/usr/pgsql-10/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ++ make -j2 ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ '[' /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 '!=' / ']' ++ rm -rf /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 +++ dirname /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 ++ mkdir -p /root/rpmbuild/BUILDROOT ++ mkdir /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 ++ cd pg_cron-1.3.1 ++ PATH=/usr/pgsql-10/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ++ /usr/bin/make install DESTDIR=/root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 'INSTALL=/usr/bin/install -p' ++ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/pgsql-10/doc/extension ++ /usr/bin/cp README.md /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/pgsql-10/doc/extension/README-pg_cron.md ++ /usr/lib/rpm/find-debuginfo.sh -j2 --strict-build-id -m -i --build-id-seed 1.3.1-1.el8 --unique-debug-suffix -1.3.1-1.el8.x86_64 --unique-debug-src-base pg_cron_10-1.3.1-1.el8.x86_64 --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 110000000 -S debugsourcefiles.list //citus-rpm-build/pg_cron-1.3.1 +224 blocks ++ /usr/lib/rpm/check-buildroot ++ /usr/lib/rpm/redhat/brp-ldconfig +/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory +Warning: Unhandled ++ /usr/lib/rpm/brp-compress ++ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip ++ /usr/lib/rpm/brp-python-bytecompile '' 1 ++ /usr/lib/rpm/brp-python-hardlink ++ PYTHON3=/usr/libexec/platform-python ++ /usr/lib/rpm/redhat/brp-mangle-shebangs ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ DOCDIR=/root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 ++ export LC_ALL=C ++ LC_ALL=C ++ export DOCDIR ++ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 ++ cp -pr CHANGELOG.md /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ /usr/bin/rm -rf /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 ++ exit 0 +tar: Removing leading `//' from member names ++ umask 022 ++ cd //citus-rpm-build ++ cd /citus-rpm-build ++ rm -rf pg_cron-1.3.1 ++ /usr/bin/gzip -dc /citus-rpm-build/36d47bf9f7eb569f43cd98ff426764f59c286508 ++ /usr/bin/tar -xof - ++ STATUS=0 ++ '[' 0 -ne 0 ']' ++ cd pg_cron-1.3.1 ++ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ PATH=/usr/pgsql-11/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ++ make -j2 ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ '[' /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 '!=' / ']' ++ rm -rf /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 +++ dirname /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 ++ mkdir -p /root/rpmbuild/BUILDROOT ++ mkdir /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 ++ cd pg_cron-1.3.1 ++ PATH=/usr/pgsql-11/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ++ /usr/bin/make install DESTDIR=/root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 'INSTALL=/usr/bin/install -p' ++ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/pgsql-11/doc/extension ++ /usr/bin/cp README.md /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/pgsql-11/doc/extension/README-pg_cron.md ++ /usr/lib/rpm/find-debuginfo.sh -j2 --strict-build-id -m -i --build-id-seed 1.3.1-1.el8 --unique-debug-suffix -1.3.1-1.el8.x86_64 --unique-debug-src-base pg_cron_11-1.3.1-1.el8.x86_64 --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 110000000 -S debugsourcefiles.list //citus-rpm-build/pg_cron-1.3.1 +224 blocks ++ /usr/lib/rpm/check-buildroot ++ /usr/lib/rpm/redhat/brp-ldconfig +/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory ++ /usr/lib/rpm/brp-compress ++ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip ++ /usr/lib/rpm/brp-python-bytecompile '' 1 ++ /usr/lib/rpm/brp-python-hardlink ++ PYTHON3=/usr/libexec/platform-python ++ /usr/lib/rpm/redhat/brp-mangle-shebangs ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ DOCDIR=/root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 ++ export LC_ALL=C ++ LC_ALL=C ++ export DOCDIR ++ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 ++ cp -pr CHANGELOG.md /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ /usr/bin/rm -rf /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 +warning: line 345: multiple %files for package 'citus-enterprise100_11' +/usr/lib/postgresql/12/lib/pgxs/src/makefiles/pgxs.mk:433: warning: ignoring old recipe for target 'check' +sh: warning: setlocale: LC_ALL: cannot change locale (C.utf8): No such file or directory +dpkg-buildpackage: warning: using a gain-root-command while being root +/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory +configure: WARNING: unrecognized options: --disable-dependency-tracking +WARNING: If you run into issues during linking or bitcode compilation, you can use --without-security-flags. +gpg: directory `/root/.gnupg' created +gpg: new configuration file `/root/.gnupg/gpg.conf' created +gpg: WARNING: options in `/root/.gnupg/gpg.conf' are not yet active during this run +gpg: keyring `/root/.gnupg/secring.gpg' created +gpg: keyring `/root/.gnupg/pubring.gpg' created +gpg: key 3F95D6C6: secret key imported +gpg: /root/.gnupg/trustdb.gpg: trustdb created +gpg: key 3F95D6C6: public key "Citus Data " imported +gpg: Total number processed: 1 +gpg: imported: 1 (RSA: 1) +gpg: secret keys read: 1 +gpg: secret keys imported: 1 + +Removing centos-8/ +Removing citus_package.log +Removing tools/.gitignore +Removing tools/CHANGELOG.md +Removing tools/HomebrewFormula/ +Removing tools/Makefile +Removing tools/README.md +Removing tools/automated_packaging/ +Removing tools/citus_dev/ +Removing tools/dashboard/ +Removing tools/packaging/Makefile +Removing tools/packaging/README.md +Removing tools/packaging/citus_package +Removing tools/travis/ +Removing tools/uncrustify/Makefile +Removing tools/uncrustify/README.md +Removing tools/uncrustify/citus-style.cfg +Removing tools/uncrustify/citus_indent +Removing tools/valgrind/ +The command "build_new_release" exited with 0. +Skipping a deployment with the packagecloud provider because this branch is not permitted: redhat-cron-1.3.1 +Skipping a deployment with the packagecloud provider because this branch is not permitted: redhat-cron-1.3.1 +Skipping a deployment with the packagecloud provider because a custom condition was not met Done. Your build exited with 0. \ No newline at end of file diff --git a/packaging_automation/tests/files/packaging_warning/sample_warning_build_output_rpm.txt b/packaging_automation/tests/files/packaging_warning/sample_warning_build_output_rpm.txt index 8f63fc7b..31f3b98a 100644 --- a/packaging_automation/tests/files/packaging_warning/sample_warning_build_output_rpm.txt +++ b/packaging_automation/tests/files/packaging_warning/sample_warning_build_output_rpm.txt @@ -1,163 +1,163 @@ -tar: Removing leading `//' from member names -+ umask 022 -+ cd //citus-rpm-build -+ cd /citus-rpm-build -+ rm -rf pg_cron-1.3.1 -+ /usr/bin/gzip -dc /citus-rpm-build/36d47bf9f7eb569f43cd98ff426764f59c286508 -+ /usr/bin/tar -xof - -+ STATUS=0 -+ '[' 0 -ne 0 ']' -+ cd pg_cron-1.3.1 -+ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ PATH=/usr/pgsql-10/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -+ make -j2 -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ '[' /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 '!=' / ']' -+ rm -rf /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 -++ dirname /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 -+ mkdir -p /root/rpmbuild/BUILDROOT -+ mkdir /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 -+ cd pg_cron-1.3.1 -+ PATH=/usr/pgsql-10/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -+ /usr/bin/make install DESTDIR=/root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 'INSTALL=/usr/bin/install -p' -+ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/pgsql-10/doc/extension -+ /usr/bin/cp README.md /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/pgsql-10/doc/extension/README-pg_cron.md -+ /usr/lib/rpm/find-debuginfo.sh -j2 --strict-build-id -m -i --build-id-seed 1.3.1-1.el8 --unique-debug-suffix -1.3.1-1.el8.x86_64 --unique-debug-src-base pg_cron_10-1.3.1-1.el8.x86_64 --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 110000000 -S debugsourcefiles.list //citus-rpm-build/pg_cron-1.3.1 -224 blocks -+ /usr/lib/rpm/check-buildroot -+ /usr/lib/rpm/redhat/brp-ldconfig -/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory -+ /usr/lib/rpm/brp-compress -+ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip -+ /usr/lib/rpm/brp-python-bytecompile '' 1 -+ /usr/lib/rpm/brp-python-hardlink -+ PYTHON3=/usr/libexec/platform-python -+ /usr/lib/rpm/redhat/brp-mangle-shebangs -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ DOCDIR=/root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 -+ export LC_ALL=C -+ LC_ALL=C -+ export DOCDIR -+ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 -+ cp -pr CHANGELOG.md /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ /usr/bin/rm -rf /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 -+ exit 0 -tar: Removing leading `//' from member names -+ umask 022 -+ cd //citus-rpm-build -+ cd /citus-rpm-build -+ rm -rf pg_cron-1.3.1 -+ /usr/bin/gzip -dc /citus-rpm-build/36d47bf9f7eb569f43cd98ff426764f59c286508 -+ /usr/bin/tar -xof - -+ STATUS=0 -+ '[' 0 -ne 0 ']' -+ cd pg_cron-1.3.1 -+ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ PATH=/usr/pgsql-11/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -+ make -j2 -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ '[' /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 '!=' / ']' -+ rm -rf /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 -++ dirname /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 -+ mkdir -p /root/rpmbuild/BUILDROOT -+ mkdir /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 -+ cd pg_cron-1.3.1 -+ PATH=/usr/pgsql-11/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -+ /usr/bin/make install DESTDIR=/root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 'INSTALL=/usr/bin/install -p' -+ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/pgsql-11/doc/extension -+ /usr/bin/cp README.md /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/pgsql-11/doc/extension/README-pg_cron.md -+ /usr/lib/rpm/find-debuginfo.sh -j2 --strict-build-id -m -i --build-id-seed 1.3.1-1.el8 --unique-debug-suffix -1.3.1-1.el8.x86_64 --unique-debug-src-base pg_cron_11-1.3.1-1.el8.x86_64 --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 110000000 -S debugsourcefiles.list //citus-rpm-build/pg_cron-1.3.1 -224 blocks -+ /usr/lib/rpm/check-buildroot -+ /usr/lib/rpm/redhat/brp-ldconfig -/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory -+ /usr/lib/rpm/brp-compress -+ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip -+ /usr/lib/rpm/brp-python-bytecompile '' 1 -+ /usr/lib/rpm/brp-python-hardlink -+ PYTHON3=/usr/libexec/platform-python -+ /usr/lib/rpm/redhat/brp-mangle-shebangs -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ DOCDIR=/root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 -+ export LC_ALL=C -+ LC_ALL=C -+ export DOCDIR -+ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 -+ cp -pr CHANGELOG.md /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ /usr/bin/rm -rf /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 -warning: line 345: multiple %files for package 'citus-enterprise100_11' -/usr/lib/postgresql/12/lib/pgxs/src/makefiles/pgxs.mk:433: warning: ignoring old recipe for target 'check' -sh: warning: setlocale: LC_ALL: cannot change locale (C.utf8): No such file or directory -dpkg-buildpackage: warning: using a gain-root-command while being root -/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory -configure: WARNING: unrecognized options: --disable-dependency-tracking -WARNING: If you run into issues during linking or bitcode compilation, you can use --without-security-flags. -gpg: directory `/root/.gnupg' created -gpg: new configuration file `/root/.gnupg/gpg.conf' created -gpg: WARNING: options in `/root/.gnupg/gpg.conf' are not yet active during this run -gpg: keyring `/root/.gnupg/secring.gpg' created -gpg: keyring `/root/.gnupg/pubring.gpg' created -gpg: key 3F95D6C6: secret key imported -gpg: /root/.gnupg/trustdb.gpg: trustdb created -gpg: key 3F95D6C6: public key "Citus Data " imported -gpg: Total number processed: 1 -gpg: imported: 1 (RSA: 1) -gpg: secret keys read: 1 -gpg: secret keys imported: 1 -Executing "/usr/bin/rpmlint -f /rpmlintrc /citus-rpm-build/x86_64/citus100_12-10.0.3.citus-1.el8.x86_64.rpm": -1 packages and 0 specfiles checked; 0 errors, 0 warnings. -Wrote: /citus-rpm-build/x86_64/citus100_12-debugsource-10.0.3.citus-1.el8.x86_64.rpm -Executing "/usr/bin/rpmlint -f /rpmlintrc /citus-rpm-build/x86_64/citus100_12-debugsource-10.0.3.citus-1.el8.x86_64.rpm": -citus100_12-debugsource.x86_64: W: no-documentation -1 packages and 0 specfiles checked; 0 errors, 1 warnings. -Wrote: /citus-rpm-build/x86_64/citus100_12-debuginfo-10.0.3.citus-1.el8.x86_64.rpm -Executing "/usr/bin/rpmlint -f /rpmlintrc /citus-rpm-build/x86_64/citus100_12-debuginfo-10.0.3.citus-1.el8.x86_64.rpm": -1 packages and 0 specfiles checked; 0 errors, 0 warnings. -Removing centos-8/ -Removing citus_package.log -Removing tools/.gitignore -Removing tools/CHANGELOG.md -Removing tools/HomebrewFormula/ -Removing tools/Makefile -Removing tools/README.md -Removing tools/automated_packaging/ -Removing tools/citus_dev/ -Removing tools/dashboard/ -Removing tools/packaging/Makefile -Removing tools/packaging/README.md -Removing tools/packaging/citus_package -Removing tools/travis/ -Removing tools/uncrustify/Makefile -Removing tools/uncrustify/README.md -Removing tools/uncrustify/citus-style.cfg -Removing tools/uncrustify/citus_indent -Removing tools/valgrind/ -The command "build_new_release" exited with 0. -Skipping a deployment with the packagecloud provider because this branch is not permitted: redhat-cron-1.3.1 -Skipping a deployment with the packagecloud provider because this branch is not permitted: redhat-cron-1.3.1 -Skipping a deployment with the packagecloud provider because a custom condition was not met +tar: Removing leading `//' from member names ++ umask 022 ++ cd //citus-rpm-build ++ cd /citus-rpm-build ++ rm -rf pg_cron-1.3.1 ++ /usr/bin/gzip -dc /citus-rpm-build/36d47bf9f7eb569f43cd98ff426764f59c286508 ++ /usr/bin/tar -xof - ++ STATUS=0 ++ '[' 0 -ne 0 ']' ++ cd pg_cron-1.3.1 ++ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ PATH=/usr/pgsql-10/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ++ make -j2 ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ '[' /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 '!=' / ']' ++ rm -rf /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 +++ dirname /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 ++ mkdir -p /root/rpmbuild/BUILDROOT ++ mkdir /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 ++ cd pg_cron-1.3.1 ++ PATH=/usr/pgsql-10/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ++ /usr/bin/make install DESTDIR=/root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 'INSTALL=/usr/bin/install -p' ++ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/pgsql-10/doc/extension ++ /usr/bin/cp README.md /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/pgsql-10/doc/extension/README-pg_cron.md ++ /usr/lib/rpm/find-debuginfo.sh -j2 --strict-build-id -m -i --build-id-seed 1.3.1-1.el8 --unique-debug-suffix -1.3.1-1.el8.x86_64 --unique-debug-src-base pg_cron_10-1.3.1-1.el8.x86_64 --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 110000000 -S debugsourcefiles.list //citus-rpm-build/pg_cron-1.3.1 +224 blocks ++ /usr/lib/rpm/check-buildroot ++ /usr/lib/rpm/redhat/brp-ldconfig +/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory ++ /usr/lib/rpm/brp-compress ++ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip ++ /usr/lib/rpm/brp-python-bytecompile '' 1 ++ /usr/lib/rpm/brp-python-hardlink ++ PYTHON3=/usr/libexec/platform-python ++ /usr/lib/rpm/redhat/brp-mangle-shebangs ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ DOCDIR=/root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 ++ export LC_ALL=C ++ LC_ALL=C ++ export DOCDIR ++ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 ++ cp -pr CHANGELOG.md /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ /usr/bin/rm -rf /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 ++ exit 0 +tar: Removing leading `//' from member names ++ umask 022 ++ cd //citus-rpm-build ++ cd /citus-rpm-build ++ rm -rf pg_cron-1.3.1 ++ /usr/bin/gzip -dc /citus-rpm-build/36d47bf9f7eb569f43cd98ff426764f59c286508 ++ /usr/bin/tar -xof - ++ STATUS=0 ++ '[' 0 -ne 0 ']' ++ cd pg_cron-1.3.1 ++ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ PATH=/usr/pgsql-11/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ++ make -j2 ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ '[' /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 '!=' / ']' ++ rm -rf /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 +++ dirname /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 ++ mkdir -p /root/rpmbuild/BUILDROOT ++ mkdir /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 ++ cd pg_cron-1.3.1 ++ PATH=/usr/pgsql-11/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ++ /usr/bin/make install DESTDIR=/root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 'INSTALL=/usr/bin/install -p' ++ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/pgsql-11/doc/extension ++ /usr/bin/cp README.md /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/pgsql-11/doc/extension/README-pg_cron.md ++ /usr/lib/rpm/find-debuginfo.sh -j2 --strict-build-id -m -i --build-id-seed 1.3.1-1.el8 --unique-debug-suffix -1.3.1-1.el8.x86_64 --unique-debug-src-base pg_cron_11-1.3.1-1.el8.x86_64 --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 110000000 -S debugsourcefiles.list //citus-rpm-build/pg_cron-1.3.1 +224 blocks ++ /usr/lib/rpm/check-buildroot ++ /usr/lib/rpm/redhat/brp-ldconfig +/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory ++ /usr/lib/rpm/brp-compress ++ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip ++ /usr/lib/rpm/brp-python-bytecompile '' 1 ++ /usr/lib/rpm/brp-python-hardlink ++ PYTHON3=/usr/libexec/platform-python ++ /usr/lib/rpm/redhat/brp-mangle-shebangs ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ DOCDIR=/root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 ++ export LC_ALL=C ++ LC_ALL=C ++ export DOCDIR ++ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 ++ cp -pr CHANGELOG.md /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ /usr/bin/rm -rf /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 +warning: line 345: multiple %files for package 'citus-enterprise100_11' +/usr/lib/postgresql/12/lib/pgxs/src/makefiles/pgxs.mk:433: warning: ignoring old recipe for target 'check' +sh: warning: setlocale: LC_ALL: cannot change locale (C.utf8): No such file or directory +dpkg-buildpackage: warning: using a gain-root-command while being root +/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory +configure: WARNING: unrecognized options: --disable-dependency-tracking +WARNING: If you run into issues during linking or bitcode compilation, you can use --without-security-flags. +gpg: directory `/root/.gnupg' created +gpg: new configuration file `/root/.gnupg/gpg.conf' created +gpg: WARNING: options in `/root/.gnupg/gpg.conf' are not yet active during this run +gpg: keyring `/root/.gnupg/secring.gpg' created +gpg: keyring `/root/.gnupg/pubring.gpg' created +gpg: key 3F95D6C6: secret key imported +gpg: /root/.gnupg/trustdb.gpg: trustdb created +gpg: key 3F95D6C6: public key "Citus Data " imported +gpg: Total number processed: 1 +gpg: imported: 1 (RSA: 1) +gpg: secret keys read: 1 +gpg: secret keys imported: 1 +Executing "/usr/bin/rpmlint -f /rpmlintrc /citus-rpm-build/x86_64/citus100_12-10.0.3.citus-1.el8.x86_64.rpm": +1 packages and 0 specfiles checked; 0 errors, 0 warnings. +Wrote: /citus-rpm-build/x86_64/citus100_12-debugsource-10.0.3.citus-1.el8.x86_64.rpm +Executing "/usr/bin/rpmlint -f /rpmlintrc /citus-rpm-build/x86_64/citus100_12-debugsource-10.0.3.citus-1.el8.x86_64.rpm": +citus100_12-debugsource.x86_64: W: no-documentation +1 packages and 0 specfiles checked; 0 errors, 1 warnings. +Wrote: /citus-rpm-build/x86_64/citus100_12-debuginfo-10.0.3.citus-1.el8.x86_64.rpm +Executing "/usr/bin/rpmlint -f /rpmlintrc /citus-rpm-build/x86_64/citus100_12-debuginfo-10.0.3.citus-1.el8.x86_64.rpm": +1 packages and 0 specfiles checked; 0 errors, 0 warnings. +Removing centos-8/ +Removing citus_package.log +Removing tools/.gitignore +Removing tools/CHANGELOG.md +Removing tools/HomebrewFormula/ +Removing tools/Makefile +Removing tools/README.md +Removing tools/automated_packaging/ +Removing tools/citus_dev/ +Removing tools/dashboard/ +Removing tools/packaging/Makefile +Removing tools/packaging/README.md +Removing tools/packaging/citus_package +Removing tools/travis/ +Removing tools/uncrustify/Makefile +Removing tools/uncrustify/README.md +Removing tools/uncrustify/citus-style.cfg +Removing tools/uncrustify/citus_indent +Removing tools/valgrind/ +The command "build_new_release" exited with 0. +Skipping a deployment with the packagecloud provider because this branch is not permitted: redhat-cron-1.3.1 +Skipping a deployment with the packagecloud provider because this branch is not permitted: redhat-cron-1.3.1 +Skipping a deployment with the packagecloud provider because a custom condition was not met Done. Your build exited with 0. \ No newline at end of file diff --git a/packaging_automation/tests/files/packaging_warning/sample_warning_build_output_rpm_success.txt b/packaging_automation/tests/files/packaging_warning/sample_warning_build_output_rpm_success.txt index 8f63fc7b..31f3b98a 100644 --- a/packaging_automation/tests/files/packaging_warning/sample_warning_build_output_rpm_success.txt +++ b/packaging_automation/tests/files/packaging_warning/sample_warning_build_output_rpm_success.txt @@ -1,163 +1,163 @@ -tar: Removing leading `//' from member names -+ umask 022 -+ cd //citus-rpm-build -+ cd /citus-rpm-build -+ rm -rf pg_cron-1.3.1 -+ /usr/bin/gzip -dc /citus-rpm-build/36d47bf9f7eb569f43cd98ff426764f59c286508 -+ /usr/bin/tar -xof - -+ STATUS=0 -+ '[' 0 -ne 0 ']' -+ cd pg_cron-1.3.1 -+ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ PATH=/usr/pgsql-10/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -+ make -j2 -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ '[' /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 '!=' / ']' -+ rm -rf /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 -++ dirname /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 -+ mkdir -p /root/rpmbuild/BUILDROOT -+ mkdir /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 -+ cd pg_cron-1.3.1 -+ PATH=/usr/pgsql-10/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -+ /usr/bin/make install DESTDIR=/root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 'INSTALL=/usr/bin/install -p' -+ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/pgsql-10/doc/extension -+ /usr/bin/cp README.md /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/pgsql-10/doc/extension/README-pg_cron.md -+ /usr/lib/rpm/find-debuginfo.sh -j2 --strict-build-id -m -i --build-id-seed 1.3.1-1.el8 --unique-debug-suffix -1.3.1-1.el8.x86_64 --unique-debug-src-base pg_cron_10-1.3.1-1.el8.x86_64 --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 110000000 -S debugsourcefiles.list //citus-rpm-build/pg_cron-1.3.1 -224 blocks -+ /usr/lib/rpm/check-buildroot -+ /usr/lib/rpm/redhat/brp-ldconfig -/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory -+ /usr/lib/rpm/brp-compress -+ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip -+ /usr/lib/rpm/brp-python-bytecompile '' 1 -+ /usr/lib/rpm/brp-python-hardlink -+ PYTHON3=/usr/libexec/platform-python -+ /usr/lib/rpm/redhat/brp-mangle-shebangs -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ DOCDIR=/root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 -+ export LC_ALL=C -+ LC_ALL=C -+ export DOCDIR -+ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 -+ cp -pr CHANGELOG.md /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ /usr/bin/rm -rf /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 -+ exit 0 -tar: Removing leading `//' from member names -+ umask 022 -+ cd //citus-rpm-build -+ cd /citus-rpm-build -+ rm -rf pg_cron-1.3.1 -+ /usr/bin/gzip -dc /citus-rpm-build/36d47bf9f7eb569f43cd98ff426764f59c286508 -+ /usr/bin/tar -xof - -+ STATUS=0 -+ '[' 0 -ne 0 ']' -+ cd pg_cron-1.3.1 -+ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ PATH=/usr/pgsql-11/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -+ make -j2 -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ '[' /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 '!=' / ']' -+ rm -rf /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 -++ dirname /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 -+ mkdir -p /root/rpmbuild/BUILDROOT -+ mkdir /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 -+ cd pg_cron-1.3.1 -+ PATH=/usr/pgsql-11/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -+ /usr/bin/make install DESTDIR=/root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 'INSTALL=/usr/bin/install -p' -+ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/pgsql-11/doc/extension -+ /usr/bin/cp README.md /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/pgsql-11/doc/extension/README-pg_cron.md -+ /usr/lib/rpm/find-debuginfo.sh -j2 --strict-build-id -m -i --build-id-seed 1.3.1-1.el8 --unique-debug-suffix -1.3.1-1.el8.x86_64 --unique-debug-src-base pg_cron_11-1.3.1-1.el8.x86_64 --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 110000000 -S debugsourcefiles.list //citus-rpm-build/pg_cron-1.3.1 -224 blocks -+ /usr/lib/rpm/check-buildroot -+ /usr/lib/rpm/redhat/brp-ldconfig -/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory -+ /usr/lib/rpm/brp-compress -+ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip -+ /usr/lib/rpm/brp-python-bytecompile '' 1 -+ /usr/lib/rpm/brp-python-hardlink -+ PYTHON3=/usr/libexec/platform-python -+ /usr/lib/rpm/redhat/brp-mangle-shebangs -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ DOCDIR=/root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 -+ export LC_ALL=C -+ LC_ALL=C -+ export DOCDIR -+ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 -+ cp -pr CHANGELOG.md /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 -+ exit 0 -+ umask 022 -+ cd //citus-rpm-build -+ cd pg_cron-1.3.1 -+ /usr/bin/rm -rf /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 -warning: line 345: multiple %files for package 'citus-enterprise100_11' -/usr/lib/postgresql/12/lib/pgxs/src/makefiles/pgxs.mk:433: warning: ignoring old recipe for target 'check' -sh: warning: setlocale: LC_ALL: cannot change locale (C.utf8): No such file or directory -dpkg-buildpackage: warning: using a gain-root-command while being root -/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory -configure: WARNING: unrecognized options: --disable-dependency-tracking -WARNING: If you run into issues during linking or bitcode compilation, you can use --without-security-flags. -gpg: directory `/root/.gnupg' created -gpg: new configuration file `/root/.gnupg/gpg.conf' created -gpg: WARNING: options in `/root/.gnupg/gpg.conf' are not yet active during this run -gpg: keyring `/root/.gnupg/secring.gpg' created -gpg: keyring `/root/.gnupg/pubring.gpg' created -gpg: key 3F95D6C6: secret key imported -gpg: /root/.gnupg/trustdb.gpg: trustdb created -gpg: key 3F95D6C6: public key "Citus Data " imported -gpg: Total number processed: 1 -gpg: imported: 1 (RSA: 1) -gpg: secret keys read: 1 -gpg: secret keys imported: 1 -Executing "/usr/bin/rpmlint -f /rpmlintrc /citus-rpm-build/x86_64/citus100_12-10.0.3.citus-1.el8.x86_64.rpm": -1 packages and 0 specfiles checked; 0 errors, 0 warnings. -Wrote: /citus-rpm-build/x86_64/citus100_12-debugsource-10.0.3.citus-1.el8.x86_64.rpm -Executing "/usr/bin/rpmlint -f /rpmlintrc /citus-rpm-build/x86_64/citus100_12-debugsource-10.0.3.citus-1.el8.x86_64.rpm": -citus100_12-debugsource.x86_64: W: no-documentation -1 packages and 0 specfiles checked; 0 errors, 1 warnings. -Wrote: /citus-rpm-build/x86_64/citus100_12-debuginfo-10.0.3.citus-1.el8.x86_64.rpm -Executing "/usr/bin/rpmlint -f /rpmlintrc /citus-rpm-build/x86_64/citus100_12-debuginfo-10.0.3.citus-1.el8.x86_64.rpm": -1 packages and 0 specfiles checked; 0 errors, 0 warnings. -Removing centos-8/ -Removing citus_package.log -Removing tools/.gitignore -Removing tools/CHANGELOG.md -Removing tools/HomebrewFormula/ -Removing tools/Makefile -Removing tools/README.md -Removing tools/automated_packaging/ -Removing tools/citus_dev/ -Removing tools/dashboard/ -Removing tools/packaging/Makefile -Removing tools/packaging/README.md -Removing tools/packaging/citus_package -Removing tools/travis/ -Removing tools/uncrustify/Makefile -Removing tools/uncrustify/README.md -Removing tools/uncrustify/citus-style.cfg -Removing tools/uncrustify/citus_indent -Removing tools/valgrind/ -The command "build_new_release" exited with 0. -Skipping a deployment with the packagecloud provider because this branch is not permitted: redhat-cron-1.3.1 -Skipping a deployment with the packagecloud provider because this branch is not permitted: redhat-cron-1.3.1 -Skipping a deployment with the packagecloud provider because a custom condition was not met +tar: Removing leading `//' from member names ++ umask 022 ++ cd //citus-rpm-build ++ cd /citus-rpm-build ++ rm -rf pg_cron-1.3.1 ++ /usr/bin/gzip -dc /citus-rpm-build/36d47bf9f7eb569f43cd98ff426764f59c286508 ++ /usr/bin/tar -xof - ++ STATUS=0 ++ '[' 0 -ne 0 ']' ++ cd pg_cron-1.3.1 ++ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ PATH=/usr/pgsql-10/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ++ make -j2 ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ '[' /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 '!=' / ']' ++ rm -rf /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 +++ dirname /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 ++ mkdir -p /root/rpmbuild/BUILDROOT ++ mkdir /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 ++ cd pg_cron-1.3.1 ++ PATH=/usr/pgsql-10/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ++ /usr/bin/make install DESTDIR=/root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 'INSTALL=/usr/bin/install -p' ++ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/pgsql-10/doc/extension ++ /usr/bin/cp README.md /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/pgsql-10/doc/extension/README-pg_cron.md ++ /usr/lib/rpm/find-debuginfo.sh -j2 --strict-build-id -m -i --build-id-seed 1.3.1-1.el8 --unique-debug-suffix -1.3.1-1.el8.x86_64 --unique-debug-src-base pg_cron_10-1.3.1-1.el8.x86_64 --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 110000000 -S debugsourcefiles.list //citus-rpm-build/pg_cron-1.3.1 +224 blocks ++ /usr/lib/rpm/check-buildroot ++ /usr/lib/rpm/redhat/brp-ldconfig +/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory ++ /usr/lib/rpm/brp-compress ++ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip ++ /usr/lib/rpm/brp-python-bytecompile '' 1 ++ /usr/lib/rpm/brp-python-hardlink ++ PYTHON3=/usr/libexec/platform-python ++ /usr/lib/rpm/redhat/brp-mangle-shebangs ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ DOCDIR=/root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 ++ export LC_ALL=C ++ LC_ALL=C ++ export DOCDIR ++ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 ++ cp -pr CHANGELOG.md /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_10 ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ /usr/bin/rm -rf /root/rpmbuild/BUILDROOT/pg_cron_10-1.3.1-1.el8.x86_64 ++ exit 0 +tar: Removing leading `//' from member names ++ umask 022 ++ cd //citus-rpm-build ++ cd /citus-rpm-build ++ rm -rf pg_cron-1.3.1 ++ /usr/bin/gzip -dc /citus-rpm-build/36d47bf9f7eb569f43cd98ff426764f59c286508 ++ /usr/bin/tar -xof - ++ STATUS=0 ++ '[' 0 -ne 0 ']' ++ cd pg_cron-1.3.1 ++ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ PATH=/usr/pgsql-11/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ++ make -j2 ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ '[' /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 '!=' / ']' ++ rm -rf /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 +++ dirname /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 ++ mkdir -p /root/rpmbuild/BUILDROOT ++ mkdir /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 ++ cd pg_cron-1.3.1 ++ PATH=/usr/pgsql-11/bin:/scripts:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ++ /usr/bin/make install DESTDIR=/root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 'INSTALL=/usr/bin/install -p' ++ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/pgsql-11/doc/extension ++ /usr/bin/cp README.md /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/pgsql-11/doc/extension/README-pg_cron.md ++ /usr/lib/rpm/find-debuginfo.sh -j2 --strict-build-id -m -i --build-id-seed 1.3.1-1.el8 --unique-debug-suffix -1.3.1-1.el8.x86_64 --unique-debug-src-base pg_cron_11-1.3.1-1.el8.x86_64 --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 110000000 -S debugsourcefiles.list //citus-rpm-build/pg_cron-1.3.1 +224 blocks ++ /usr/lib/rpm/check-buildroot ++ /usr/lib/rpm/redhat/brp-ldconfig +/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory ++ /usr/lib/rpm/brp-compress ++ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip ++ /usr/lib/rpm/brp-python-bytecompile '' 1 ++ /usr/lib/rpm/brp-python-hardlink ++ PYTHON3=/usr/libexec/platform-python ++ /usr/lib/rpm/redhat/brp-mangle-shebangs ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ DOCDIR=/root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 ++ export LC_ALL=C ++ LC_ALL=C ++ export DOCDIR ++ /usr/bin/mkdir -p /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 ++ cp -pr CHANGELOG.md /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64/usr/share/doc/pg_cron_11 ++ exit 0 ++ umask 022 ++ cd //citus-rpm-build ++ cd pg_cron-1.3.1 ++ /usr/bin/rm -rf /root/rpmbuild/BUILDROOT/pg_cron_11-1.3.1-1.el8.x86_64 +warning: line 345: multiple %files for package 'citus-enterprise100_11' +/usr/lib/postgresql/12/lib/pgxs/src/makefiles/pgxs.mk:433: warning: ignoring old recipe for target 'check' +sh: warning: setlocale: LC_ALL: cannot change locale (C.utf8): No such file or directory +dpkg-buildpackage: warning: using a gain-root-command while being root +/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /etc/ld.so.conf: No such file or directory +configure: WARNING: unrecognized options: --disable-dependency-tracking +WARNING: If you run into issues during linking or bitcode compilation, you can use --without-security-flags. +gpg: directory `/root/.gnupg' created +gpg: new configuration file `/root/.gnupg/gpg.conf' created +gpg: WARNING: options in `/root/.gnupg/gpg.conf' are not yet active during this run +gpg: keyring `/root/.gnupg/secring.gpg' created +gpg: keyring `/root/.gnupg/pubring.gpg' created +gpg: key 3F95D6C6: secret key imported +gpg: /root/.gnupg/trustdb.gpg: trustdb created +gpg: key 3F95D6C6: public key "Citus Data " imported +gpg: Total number processed: 1 +gpg: imported: 1 (RSA: 1) +gpg: secret keys read: 1 +gpg: secret keys imported: 1 +Executing "/usr/bin/rpmlint -f /rpmlintrc /citus-rpm-build/x86_64/citus100_12-10.0.3.citus-1.el8.x86_64.rpm": +1 packages and 0 specfiles checked; 0 errors, 0 warnings. +Wrote: /citus-rpm-build/x86_64/citus100_12-debugsource-10.0.3.citus-1.el8.x86_64.rpm +Executing "/usr/bin/rpmlint -f /rpmlintrc /citus-rpm-build/x86_64/citus100_12-debugsource-10.0.3.citus-1.el8.x86_64.rpm": +citus100_12-debugsource.x86_64: W: no-documentation +1 packages and 0 specfiles checked; 0 errors, 1 warnings. +Wrote: /citus-rpm-build/x86_64/citus100_12-debuginfo-10.0.3.citus-1.el8.x86_64.rpm +Executing "/usr/bin/rpmlint -f /rpmlintrc /citus-rpm-build/x86_64/citus100_12-debuginfo-10.0.3.citus-1.el8.x86_64.rpm": +1 packages and 0 specfiles checked; 0 errors, 0 warnings. +Removing centos-8/ +Removing citus_package.log +Removing tools/.gitignore +Removing tools/CHANGELOG.md +Removing tools/HomebrewFormula/ +Removing tools/Makefile +Removing tools/README.md +Removing tools/automated_packaging/ +Removing tools/citus_dev/ +Removing tools/dashboard/ +Removing tools/packaging/Makefile +Removing tools/packaging/README.md +Removing tools/packaging/citus_package +Removing tools/travis/ +Removing tools/uncrustify/Makefile +Removing tools/uncrustify/README.md +Removing tools/uncrustify/citus-style.cfg +Removing tools/uncrustify/citus_indent +Removing tools/valgrind/ +The command "build_new_release" exited with 0. +Skipping a deployment with the packagecloud provider because this branch is not permitted: redhat-cron-1.3.1 +Skipping a deployment with the packagecloud provider because this branch is not permitted: redhat-cron-1.3.1 +Skipping a deployment with the packagecloud provider because a custom condition was not met Done. Your build exited with 0. \ No newline at end of file diff --git a/packaging_automation/tests/files/pkgvars b/packaging_automation/tests/files/pkgvars index 4669bfb9..cd0aaace 100644 --- a/packaging_automation/tests/files/pkgvars +++ b/packaging_automation/tests/files/pkgvars @@ -1,5 +1,5 @@ -pkgname=citus -pkgdesc='Citus (Open-Source)' -pkglatest=10.0.2-1 -nightlyref=main -versioning=fancy +pkgname=citus +pkgdesc='Citus (Open-Source)' +pkglatest=10.0.2-1 +nightlyref=main +versioning=fancy diff --git a/packaging_automation/tests/files/postgres-matrix.yml b/packaging_automation/tests/files/postgres-matrix.yml index 5bb5d396..e9d4744a 100644 --- a/packaging_automation/tests/files/postgres-matrix.yml +++ b/packaging_automation/tests/files/postgres-matrix.yml @@ -1,17 +1,17 @@ -### THIS FILE IS ONLY FOR UNIT TESTS. ACTUAL POSTGRES MATRIX FILES ARE IN PACKAGING PROJECT BRANCHES ### -name: Postgres Version Matrix -project_name: citus # alternatives: citus, citus-enterprise, pg-auto-failover, pg-auto-failover-enterprise -# There is one configuration like this for each project in packaging repo -# i.e. in all-citus, all-enterprise, all-pgautofailover, all-pgautofailover-enterprise, pgxn-citus etc. -version_matrix: - - 8.0: - postgres_versions: [10, 11] - - 9.0: - postgres_versions: [11, 12] - - 9.5: - postgres_versions: [11, 12, 13] - # If 10.0 is released, since it is between 10.1 and 9.5, 9.5 support will be effective for 10.0.x releases - - 10.1: - postgres_versions: [12, 13] - - 10.2: - postgres_versions: [12, 13, 14] +### THIS FILE IS ONLY FOR UNIT TESTS. ACTUAL POSTGRES MATRIX FILES ARE IN PACKAGING PROJECT BRANCHES ### +name: Postgres Version Matrix +project_name: citus # alternatives: citus, citus-enterprise, pg-auto-failover, pg-auto-failover-enterprise +# There is one configuration like this for each project in packaging repo +# i.e. in all-citus, all-enterprise, all-pgautofailover, all-pgautofailover-enterprise, pgxn-citus etc. +version_matrix: + - 8.0: + postgres_versions: [10, 11] + - 9.0: + postgres_versions: [11, 12] + - 9.5: + postgres_versions: [11, 12, 13] + # If 10.0 is released, since it is between 10.1 and 9.5, 9.5 support will be effective for 10.0.x releases + - 10.1: + postgres_versions: [12, 13] + - 10.2: + postgres_versions: [12, 13, 14] diff --git a/packaging_automation/tests/files/postgres-matrix/postgres-matrix-success.yml b/packaging_automation/tests/files/postgres-matrix/postgres-matrix-success.yml index 75a03e00..21ba54ea 100644 --- a/packaging_automation/tests/files/postgres-matrix/postgres-matrix-success.yml +++ b/packaging_automation/tests/files/postgres-matrix/postgres-matrix-success.yml @@ -1,16 +1,16 @@ -name: Postgres Version Matrix -project_name: citus # alternatives: citus, citus-enterprise, pg-auto-failover, pg-auto-failover-enterprise -# There will be one configuration like this for each project in packaging repo -# i.e. in all-citus, all-enterprise,all-pgautofailover,all-pgautofailover-enterprise, pgxn-citus etc. -version_matrix: - - 8.0: - postgres_versions: [10, 11] - - 9.0: - postgres_versions: [11, 12] - - 9.5: - postgres_versions: [11, 12, 13] - # If 10.0 is released, since it is between 10.1 and 9.5, 9.5 support will be effective for 10.0.x releases - - 10.1: - postgres_versions: [12, 13] - - 10.2: - postgres_versions: [12, 13, 14] +name: Postgres Version Matrix +project_name: citus # alternatives: citus, citus-enterprise, pg-auto-failover, pg-auto-failover-enterprise +# There will be one configuration like this for each project in packaging repo +# i.e. in all-citus, all-enterprise,all-pgautofailover,all-pgautofailover-enterprise, pgxn-citus etc. +version_matrix: + - 8.0: + postgres_versions: [10, 11] + - 9.0: + postgres_versions: [11, 12] + - 9.5: + postgres_versions: [11, 12, 13] + # If 10.0 is released, since it is between 10.1 and 9.5, 9.5 support will be effective for 10.0.x releases + - 10.1: + postgres_versions: [12, 13] + - 10.2: + postgres_versions: [12, 13, 14] diff --git a/packaging_automation/tests/files/verify/debian_changelog_with_10.2.4.txt b/packaging_automation/tests/files/verify/debian_changelog_with_10.2.4.txt index ae65c8bb..99c46a00 100644 --- a/packaging_automation/tests/files/verify/debian_changelog_with_10.2.4.txt +++ b/packaging_automation/tests/files/verify/debian_changelog_with_10.2.4.txt @@ -1,2161 +1,2161 @@ -citus (10.2.4.citus-1) stable; urgency=low - - * Official 10.2.4 release of Citus - - -- Gurkan Indibay Tue, 01 Feb 2022 12:00:47 +0000 - -citus (10.1.4.citus-1) stable; urgency=low - - * Official 10.1.4 release of Citus - - -- Gurkan Indibay Tue, 01 Feb 2022 11:49:11 +0000 - -citus (10.2.3.citus-1) stable; urgency=low - - * Official 10.2.3 release of Citus - - -- Gurkan Indibay Mon, 29 Nov 2021 11:00:41 +0000 - -citus (10.0.6.citus-1) stable; urgency=low - - * Official 10.0.6 release of Citus - - -- Gurkan Indibay Fri, 12 Nov 2021 11:37:25 +0000 - -citus (9.5.10.citus-1) stable; urgency=low - - * Official 9.5.10 release of Citus - - -- Gurkan Indibay Mon, 08 Nov 2021 14:18:56 +0000 - -citus (9.2.8.citus-1) stable; urgency=low - - * Official 9.2.8 release of Citus - - -- Gurkan Indibay Thu, 04 Nov 2021 12:45:09 +0000 - -citus (9.2.7.citus-1) stable; urgency=low - - * Official 9.2.7 release of Citus - - -- Gurkan Indibay Wed, 03 Nov 2021 09:34:30 +0000 - -citus (10.2.2.citus-1) stable; urgency=low - - * Official 10.2.2 release of Citus - - -- Gurkan Indibay Thu, 14 Oct 2021 13:00:27 +0000 - -citus (10.2.1.citus-1) stable; urgency=low - - * Adds missing version-mismatch checks for columnar tables - - * Adds missing version-mismatch checks for internal functions - - * Fixes a bug that could cause partition shards being not co-located with - parent shards - - * Fixes a bug that prevents pushing down boolean expressions when using - columnar custom scan - - * Fixes a clog lookup failure that could occur when writing to a columnar - table - - * Fixes an issue that could cause unexpected errors when there is an - in-progress write to a columnar table - - * Revokes read access to `columnar.chunk` from unprivileged user - - -- Gurkan Indibay Fri, 24 Sep 2021 12:03:35 +0000 - -citus (10.1.3.citus-1) stable; urgency=low - - * Official 10.1.3 release of Citus - - -- Gurkan Indibay Fri, 17 Sep 2021 17:23:05 +0000 - -citus (10.2.0.citus-1) stable; urgency=low - - * Adds PostgreSQL 14 support - - * Adds hash & btree index support for columnar tables - - * Adds helper UDFs for easy time partition management: - `get_missing_time_partition_ranges`, `create_time_partitions`, and - `drop_old_time_partitions` - - * Adds propagation of ALTER SEQUENCE - - * Adds support for ALTER INDEX ATTACH PARTITION - - * Adds support for CREATE INDEX ON ONLY - - * Allows more graceful failovers when replication factor > 1 - - * Enables chunk group filtering to work with Params for columnar tables - - * Enables qual push down for joins including columnar tables - - * Enables transferring of data using binary encoding by default on PG14 - - * Improves `master_update_table_statistics` and provides distributed deadlock - detection - - * Includes `data_type` and `cache` in sequence definition on worker - - * Makes start/stop_metadata_sync_to_node() transactional - - * Makes sure that table exists before updating table statistics - - * Prevents errors with concurrent `citus_update_table_statistics` and DROP table - - * Reduces memory usage of columnar table scans by freeing the memory used for - last stripe read - - * Shows projected columns for columnar tables in EXPLAIN output - - * Speeds up dropping partitioned tables - - * Synchronizes hasmetadata flag on mx workers - - * Uses current user while syncing metadata - - * Adds a parameter to cleanup metadata when metadata syncing is stopped - - * Fixes a bug about int and smallint sequences on MX - - * Fixes a bug that cause partitions to have wrong distribution key after - DROP COLUMN - - * Fixes a bug that caused `worker_append_table_to_shard` to write as superuser - - * Fixes a bug that caused `worker_create_or_alter_role` to crash with NULL input - - * Fixes a bug that causes pruning incorrect shard of a range distributed table - - * Fixes a bug that may cause crash while aborting transaction - - * Fixes a bug that prevents attaching partitions when colocated foreign key - exists - - * Fixes a bug with `nextval('seq_name'::text)` - - * Fixes a crash in shard rebalancer when no distributed tables exist - - * Fixes a segfault caused by use after free in when using a cached connection - - * Fixes a UNION pushdown issue - - * Fixes a use after free issue that could happen when altering a distributed - table - - * Fixes showing target shard size in the rebalance progress monitor - - -- Gurkan Indibay Thu, 16 Sep 2021 08:45:17 +0000 - -citus (10.1.2.citus-1) stable; urgency=low - - * Allows more graceful failovers when replication factor > 1 - - * Fixes a bug that causes partitions to have wrong distribution key after - `DROP COLUMN` - - -- Gurkan Indibay Tue, 17 Aug 2021 16:25:14 +0000 - -citus (10.0.5.citus-1) stable; urgency=low - - * Allows more graceful failovers when replication factor > 1 - - * Fixes a bug that causes partitions to have wrong distribution key after - `DROP COLUMN` - - * Improves citus_update_table_statistics and provides distributed deadlock - detection - - -- Gurkan Indibay Tue, 17 Aug 2021 14:42:54 +0000 - -citus (9.5.7.citus-1) stable; urgency=low - - * Allows more graceful failovers when replication factor > 1 - - * Fixes a bug that causes partitions to have wrong distribution key after - `DROP COLUMN` - - * Improves master_update_table_statistics and provides distributed deadlock - detection - - -- Gurkan Indibay Tue, 17 Aug 2021 13:18:11 +0000 - -citus (9.4.6.citus-1) stable; urgency=low - - * Allows more graceful failovers when replication factor > 1 - - * Improves master_update_table_statistics and provides distributed deadlock - detection - - -- Gurkan Indibay Wed, 11 Aug 2021 08:57:04 +0000 - -citus (10.1.1.citus-1) stable; urgency=low - - * Improves citus_update_table_statistics and provides distributed deadlock - detection - - * Fixes showing target shard size in the rebalance progress monitor - - -- Gurkan Indibay Fri, 06 Aug 2021 08:38:37 +0000 - -citus (10.1.0.citus-1) stable; urgency=low - - * Drops support for PostgreSQL 11 - - * Adds `shard_count` parameter to `create_distributed_table` function - - * Adds support for `ALTER DATABASE OWNER` - - * Adds support for temporary columnar tables - - * Adds support for using sequences as column default values when syncing - metadata - - * `alter_columnar_table_set` enforces columnar table option constraints - - * Continues to remove shards after failure in `DropMarkedShards` - - * Deprecates the `citus.replication_model` GUC - - * Enables `citus.defer_drop_after_shard_move` by default - - * Ensures free disk space before moving a shard - - * Fetches shard size on the fly for the rebalance monitor - - * Ignores old placements when disabling or removing a node - - * Implements `improvement_threshold` at shard rebalancer moves - - * Improves orphaned shard cleanup logic - - * Improves performance of `citus_shards` - - * Introduces `citus.local_hostname` GUC for connections to the current node - - * Makes sure connection is closed after each shard move - - * Makes sure that target node in shard moves is eligible for shard move - - * Optimizes partitioned disk size calculation for shard rebalancer - - * Prevents connection errors by properly terminating connections - - * Prevents inheriting a distributed table - - * Prevents users from dropping & truncating known shards - - * Pushes down `VALUES` clause as long as not in outer part of a `JOIN` - - * Reduces memory usage for multi-row inserts - - * Reduces memory usage while rebalancing shards - - * Removes length limits around partition names - - * Removes dependencies on the existence of public schema - - * Executor avoids opening extra connections - - * Excludes orphaned shards while finding shard placements - - * Preserves access method of materialized views when undistributing - or altering distributed tables - - * Fixes a bug that allowed moving of shards belonging to a reference table - - * Fixes a bug that can cause a crash when DEBUG4 logging is enabled - - * Fixes a bug that causes pruning incorrect shard of a range distributed table - - * Fixes a bug that causes worker_create_or_alter_role to crash with NULL input - - * Fixes a bug where foreign key to reference table was disallowed - - * Fixes a bug with local cached plans on tables with dropped columns - - * Fixes data race in `get_rebalance_progress` - - * Fixes `FROM ONLY` queries on partitioned tables - - * Fixes an issue that could cause citus_finish_pg_upgrade to fail - - * Fixes error message for local table joins - - * Fixes issues caused by omitting public schema in queries - - * Fixes nested `SELECT` query with `UNION` bug - - * Fixes null relationName bug at parallel execution - - * Fixes possible segfaults when using Citus in the middle of an upgrade - - * Fixes problems with concurrent calls of `DropMarkedShards` - - * Fixes shared dependencies that are not resident in a database - - * Fixes stale hostnames bug in prepared statements after `master_update_node` - - * Fixes the relation size bug during rebalancing - - * Fixes two race conditions in the get_rebalance_progress - - * Fixes using 2PC when it might be necessary - - -- Gurkan Indibay Fri, 16 Jul 2021 15:37:21 +0000 - -citus (10.0.4.citus-1) stable; urgency=low - - * Introduces `citus.local_hostname` GUC for connections to the current node - - * Removes dependencies on the existence of public schema - - * Removes limits around long partition names - - * Fixes a bug that can cause a crash when DEBUG4 logging is enabled - - * Fixes a bug that causes pruning incorrect shard of a range distributed table - - * Fixes an issue that could cause citus_finish_pg_upgrade to fail - - * Fixes FROM ONLY queries on partitioned tables - - * Fixes issues caused by public schema being omitted in queries - - * Fixes problems with concurrent calls of DropMarkedShards - - * Fixes relname null bug when using parallel execution - - * Fixes two race conditions in the get_rebalance_progress - - -- Gurkan Indibay Fri, 16 Jul 2021 10:58:55 +0000 - -citus (9.5.6.citus-1) stable; urgency=low - - * Fixes minor bug in `citus_prepare_pg_upgrade` that caused it to lose its - idempotency - - -- Gurkan Fri, 09 Jul 2021 13:30:42 +0000 - -citus (9.4.5.citus-1) stable; urgency=low - - * Adds a configure flag to enforce security - - * Avoids re-using connections for intermediate results - - * Fixes a bug that causes pruning incorrect shard of a range distributed table - - * Fixes a bug that might cause self-deadlocks when COPY used in TX block - - * Fixes an issue that could cause citus_finish_pg_upgrade to fail - - -- Gurkan Thu, 08 Jul 2021 10:15:23 +0000 - -citus (10.0.3.citus-1) stable; urgency=low - - * Prevents infinite recursion for queries that involve UNION ALL - below `JOIN` - - * Fixes a crash in queries with a modifying CTE and a SELECT - without `FROM` - - * Fixes upgrade and downgrade paths for citus_update_table_statistics - - * Fixes a bug that causes SELECT queries to use 2PC unnecessarily - - * Fixes a bug that might cause self-deadlocks with - `CREATE INDEX` / `REINDEX CONCURRENTLY` commands - - * Adds citus.max_cached_connection_lifetime GUC to set maximum connection - lifetime - - * Adds citus.remote_copy_flush_threshold GUC that controls - per-shard memory usages by `COPY` - - * Adds citus_get_active_worker_nodes UDF to deprecate - `master_get_active_worker_nodes` - - * Skips 2PC for readonly connections in a transaction - - * Makes sure that local execution starts coordinated transaction - - * Removes open temporary file warning when cancelling a query with - an open tuple store - - * Relaxes the locks when adding an existing node - - -- Gurkan Indibay Thu, 18 Mar 2021 01:40:08 +0000 - -citus (10.0.2.citus-1) stable; urgency=low - - * Adds a configure flag to enforce security - - * Fixes a bug due to cross join without target list - - * Fixes a bug with UNION ALL on PG 13 - - * Fixes a compatibility issue with pg_audit in utility calls - - * Fixes insert query with CTEs/sublinks/subqueries etc - - * Grants SELECT permission on citus_tables view to public - - * Grants SELECT permission on columnar metadata tables to public - - * Improves citus_update_table_statistics and provides distributed deadlock - detection - - * Preserves colocation with procedures in alter_distributed_table - - * Prevents using alter_columnar_table_set and alter_columnar_table_reset - on a columnar table not owned by the user - - * Removes limits around long table names - - -- Gurkan Indibay Thu, 4 Mar 2021 2:46:54 +0000 - -citus (9.5.2.citus-1) stable; urgency=low - - * Fixes distributed deadlock detection being blocked by metadata sync - - * Prevents segfaults when SAVEPOINT handling cannot recover from connection - failures - - * Fixes possible issues that might occur with single shard distributed tables - - -- gurkanindibay Wed, 27 Jan 2021 11:25:38 +0000 - -citus (9.4.4.citus-1) stable; urgency=low - - * Fixes a bug that could cause router queries with local tables to be pushed - down - - * Fixes a segfault in connection management due to invalid connection hash - entries - - * Fixes possible issues that might occur with single shard distributed tables - - -- gurkanindibay Tue, 5 Jan 2021 14:58:56 +0000 - -citus (9.5.1.citus-1) stable; urgency=low - - * Enables PostgreSQL's parallel queries on EXPLAIN ANALYZE - - * Fixes a bug that could cause excessive memory consumption when a partition is - created - - * Fixes a bug that triggers subplan executions unnecessarily with cursors - - * Fixes a segfault in connection management due to invalid connection hash - entries - - -- Onur Tirtir Wed, 2 Dec 2020 14:28:44 +0000 - -citus (9.4.3.citus-1) stable; urgency=low - - * Enables PostgreSQL's parallel queries on EXPLAIN ANALYZE - - * Fixes a bug that triggers subplan executions unnecessarily with cursors - - -- Onur Tirtir Tue, 24 Nov 2020 11:17:57 +0000 - -citus (9.5.0.citus-1) stable; urgency=low - - * Adds support for PostgreSQL 13 - - * Removes the task-tracker executor - - * Introduces citus local tables - - * Introduces undistribute_table UDF to convert tables back to postgres tables - - * Adds support for EXPLAIN (ANALYZE) EXECUTE and EXPLAIN EXECUTE - - * Adds support for EXPLAIN (ANALYZE, WAL) for PG13 - - * Sorts the output of EXPLAIN (ANALYZE) by execution duration. - - * Adds support for CREATE TABLE ... USING table_access_method - - * Adds support for WITH TIES option in SELECT and INSERT SELECT queries - - * Avoids taking multi-shard locks on workers - - * Enforces citus.max_shared_pool_size config in COPY queries - - * Enables custom aggregates with multiple parameters to be executed on workers - - * Enforces citus.max_intermediate_result_size in local execution - - * Improves cost estimation of INSERT SELECT plans - - * Introduces delegation of procedures that read from reference tables - - * Prevents pull-push execution for simple pushdownable subqueries - - * Improves error message when creating a foreign key to a local table - - * Makes citus_prepare_pg_upgrade idempotent by dropping transition tables - - * Disallows ON TRUE outer joins with reference & distributed tables when - reference table is outer relation to avoid incorrect results - - * Disallows field indirection in INSERT/UPDATE queries to avoid incorrect - results - - * Disallows volatile functions in UPDATE subqueries to avoid incorrect results - - * Fixes CREATE INDEX CONCURRENTLY crash with local execution - - * Fixes citus_finish_pg_upgrade to drop all backup tables - - * Fixes a bug that cause failures when RECURSIVE VIEW joined reference table - - * Fixes DROP SEQUENCE failures when metadata syncing is enabled - - * Fixes a bug that caused CREATE TABLE with CHECK constraint to fail - - * Fixes a bug that could cause VACUUM to deadlock - - * Fixes master_update_node failure when no background worker slots are available - - * Fixes a bug that caused replica identity to not be propagated on shard repair - - * Fixes a bug that could cause crashes after connection timeouts - - * Fixes a bug that could cause crashes with certain compile flags - - * Fixes a bug that could cause deadlocks on CREATE INDEX - - * Fixes a bug with genetic query optimization in outer joins - - * Fixes a crash when aggregating empty tables - - * Fixes a crash with inserting domain constrained composite types - - * Fixes a crash with multi-row & router INSERT's in local execution - - * Fixes a possibility of doing temporary file cleanup more than once - - * Fixes incorrect setting of join related fields - - * Fixes memory issues around deparsing index commands - - * Fixes reference table access tracking for sequential execution - - * Fixes removal of a single node with only reference tables - - * Fixes sending commands to coordinator when it is added as a worker - - * Fixes write queries with const expressions and COLLATE in various places - - * Fixes wrong cancellation message about distributed deadlock - - -- Onur Tirtir Wed, 11 Nov 2020 15:00:27 +0000 - -citus (9.4.2.citus-1) stable; urgency=low - - * Fixes a bug that could lead to multiple maintenance daemons - - * Fixes an issue preventing views in reference table modifications - - -- Onur Tirtir Thu, 22 Oct 2020 8:53:44 +0000 - -citus (9.4.1.citus-1) stable; urgency=low - - * Fixes EXPLAIN ANALYZE output truncation - - * Fixes a deadlock during transaction recovery - - -- Onur Tirtir Wed, 30 Sep 2020 9:33:46 +0000 - -citus (9.4.0.citus-1) stable; urgency=low - - * Improves COPY by honoring max_adaptive_executor_pool_size config - - * Adds support for insert into local table select from distributed table - - * Adds support to partially push down tdigest aggregates - - * Adds support for receiving binary encoded results from workers using - citus.enable_binary_protocol - - * Enables joins between local tables and CTEs - - * Adds showing query text in EXPLAIN output when explain verbose is true - - * Adds support for showing CTE statistics in EXPLAIN ANALYZE - - * Adds support for showing amount of data received in EXPLAIN ANALYZE - - * Introduces downgrade paths in migration scripts - - * Avoids returning incorrect results when changing roles in a transaction - - * Fixes ALTER TABLE IF EXISTS SET SCHEMA with non-existing table bug - - * Fixes CREATE INDEX CONCURRENTLY with no index name on a postgres table bug - - * Fixes a bug that could cause crashes with certain compile flags - - * Fixes a bug with lists of configuration values in ALTER ROLE SET statements - - * Fixes a bug that occurs when coordinator is added as a worker node - - * Fixes a crash because of overflow in partition id with certain compile flags - - * Fixes a crash that may happen if no worker nodes are added - - * Fixes a crash that occurs when inserting implicitly coerced constants - - * Fixes a crash when aggregating empty tables - - * Fixes a memory leak in subtransaction memory handling - - * Fixes crash when using rollback to savepoint after cancellation of DML - - * Fixes deparsing for queries with anonymous column references - - * Fixes distribution of composite types failing to include typemods - - * Fixes explain analyze on adaptive executor repartitions - - * Fixes possible error throwing in abort handle - - * Fixes segfault when evaluating func calls with default params on coordinator - - * Fixes several EXPLAIN ANALYZE issues - - * Fixes write queries with const expressions and COLLATE in various places - - * Fixes wrong cancellation message about distributed deadlocks - - * Reports correct INSERT/SELECT method in EXPLAIN - - * Disallows triggers on citus tables - - -- Onur Tirtir Tue, 28 Jul 2020 13:22:31 +0000 - -citus (9.3.5.citus-1) stable; urgency=low - - * Fixes ALTER TABLE IF EXISTS SET SCHEMA with non-existing table bug - - * Fixes CREATE INDEX CONCURRENTLY with no index name on a postgres table bug - - * Fixes a crash because of overflow in partition id with certain compile flags - - -- Onur Tirtir Mon, 27 Jul 2020 7:28:18 +0000 - -citus (9.3.4.citus-1) stable; urgency=low - - * Fixes a bug that could cause crashes with certain compile flags - - * Fixes a bug with lists of configuration values in ALTER ROLE SET statements - - * Fixes deparsing for queries with anonymous column references - - -- Onur Tirtir Wed, 22 Jul 2020 9:00:01 +0000 - -citus (9.3.3.citus-1) stable; urgency=low - - * Fixes a memory leak in subtransaction memory handling - - -- Onur Tirtir Mon, 13 Jul 2020 8:47:40 +0000 - -citus (9.3.0.citus-1) stable; urgency=low - - * Adds max_shared_pool_size to control number of connections across sessions - - * Adds support for window functions on coordinator - - * Improves shard pruning logic to understand OR-conditions - - * Prevents using an extra connection for intermediate result multi-casts - - * Adds propagation of ALTER ROLE .. SET statements - - * Adds update_distributed_table_colocation UDF to update colocation of tables - - * Introduces a UDF to truncate local data after distributing a table - - * Adds support for creating temp schemas in parallel - - * Adds support for evaluation of nextval in the target list on coordinator - - * Adds support for local execution of COPY/TRUNCATE/DROP/DDL commands - - * Adds support for local execution of shard creation - - * Uses local execution in a transaction block - - * Adds support for querying distributed table sizes concurrently - - * Allows master_copy_shard_placement to replicate placements to new nodes - - * Allows table type to be used in target list - - * Avoids having multiple maintenance daemons active for a single database - - * Defers reference table replication to shard creation time - - * Enables joins between local tables and reference tables in transaction blocks - - * Ignores pruned target list entries in coordinator plan - - * Improves SIGTERM handling of maintenance daemon - - * Increases the default of citus.node_connection_timeout to 30 seconds - - * Fixes a bug that occurs when creating remote tasks in local execution - - * Fixes a bug that causes some DML queries containing aggregates to fail - - * Fixes a bug that could cause failures in queries with subqueries or CTEs - - * Fixes a bug that may cause some connection failures to throw errors - - * Fixes a bug which caused queries with SRFs and function evalution to fail - - * Fixes a bug with generated columns when executing COPY dist_table TO file - - * Fixes a crash when using non-constant limit clauses - - * Fixes a failure when composite types used in prepared statements - - * Fixes a possible segfault when dropping dist. table in a transaction block - - * Fixes a possible segfault when non-pushdownable aggs are solely used in HAVING - - * Fixes a segfault when executing queries using GROUPING - - * Fixes an error when using LEFT JOIN with GROUP BY on primary key - - * Fixes an issue with distributing tables having generated cols not at the end - - * Fixes automatic SSL permission issue when using "initdb --allow-group-access" - - * Fixes errors which could occur when subqueries are parameters to aggregates - - * Fixes possible issues by invalidating the plan cache in master_update_node - - * Fixes timing issues which could be caused by changing system clock - - -- Onur Tirtir Thu, 7 May 2020 15:11:25 +0000 - -citus (9.2.4.citus-1) stable; urgency=low - - * Fixes a release problem in 9.2.3 - - -- Onur Tirtir Tue, 31 Mar 2020 08:06:59 +0000 - -citus (9.2.3.citus-1) stable; urgency=low - - * Do not use C functions that have been banned by Microsoft - - * Fixes a bug that causes wrong results with complex outer joins - - * Fixes issues found using static analysis - - * Fixes left join shard pruning in pushdown planner - - * Fixes possibility for segmentation fault in internal aggregate functions - - * Fixes possible segfault when non pushdownable aggregates are used in HAVING - - * Improves correctness of planning subqueries in HAVING - - * Prevents using old connections for security if citus.node_conninfo changed - - * Uses Microsoft approved cipher string for default TLS setup - - -- Onur Tirtir Thu, 26 Mar 2020 8:22:48 +0000 - -citus (9.0.2.citus-1) stable; urgency=low - - * Fixes build errors on EL/OL 6 based distros - - * Fixes a bug that caused maintenance daemon to fail on standby nodes - - * Disallows distributed function creation when replication_model is `statement` - - -- Onur Tirtir Fri, 6 Mar 2020 14:10:16 +0000 - -citus (9.2.2.citus-1) stable; urgency=low - - * Fixes a bug that caused some prepared stmts with function calls to fail - - * Fixes a bug that caused some prepared stmts with composite types to fail - - * Fixes a bug that caused missing subplan results in workers - - * Improves performance of re-partition joins - - -- Onur Tirtir Fri, 6 Mar 2020 07:14:20 +0000 - -citus (9.2.1.citus-1) stable; urgency=low - - * Fixes a bug that could cause crashes if distribution key is NULL - - -- Onur Tirtir Fri, 14 Feb 2020 11:51:09 +0000 - -citus (9.2.0.citus-1) stable; urgency=low - - * Adds support for INSERT...SELECT queries with re-partitioning - - * Adds citus.coordinator_aggregation_strategy to support more aggregates - - * Adds caching of local plans on shards for Citus MX - - * Adds compatibility support for dist. object infrastructure from old versions - - * Adds defering shard-pruning for fast-path router queries to execution - - * Adds propagation of GRANT ... ON SCHEMA queries - - * Adds support for CTE pushdown via CTE inlining in distributed planning - - * Adds support for ALTER TABLE ... SET SCHEMA propagation. - - * Adds support for DROP ROUTINE & ALTER ROUTINE commands - - * Adds support for any inner join on a reference table - - * Changes citus.log_remote_commands level to NOTICE - - * Disallows marking ref. table shards unhealthy in the presence of savepoints - - * Disallows placing new shards with shards in TO_DELETE state - - * Enables local execution of queries that do not need any data access - - * Fixes Makefile trying to cleanup PG directory during install - - * Fixes a bug causing errors when planning a query with multiple subqueries - - * Fixes a possible deadlock that could happen during shard moves - - * Fixes a problem when adding a new node due to tables referenced in func body - - * Fixes an issue that could cause joins with reference tables to be slow - - * Fixes cached metadata for shard is inconsistent issue - - * Fixes inserting multiple composite types as partition key in VALUES - - * Fixes unnecessary repartition on joins with more than 4 tables - - * Prevents wrong results for replicated partitioned tables after failure - - * Restricts LIMIT approximation for non-commutative aggregates - - -- Onur Tirtir Wed, 10 Feb 2020 8:48:00 +0000 - -citus (9.1.1.citus-1) stable; urgency=low - - * Fixes a bug causing SQL-executing UDFs to crash when passing in DDL - - * Fixes a bug that caused column_to_column_name to crash for invalid input - - * Fixes a bug that caused inserts into local tables w/ dist. subqueries to crash - - * Fixes a bug that caused some noop DML statements to fail - - * Fixes a bug that prevents dropping reference table columns - - * Fixes a crash in IN (.., NULL) queries - - * Fixes a crash when calling a distributed function from PL/pgSQL - - * Fixes an issue that caused CTEs to sometimes leak connections - - * Fixes strange errors in DML with unreachable sublinks - - * Prevents statements in SQL functions to run outside of a transaction - - -- Onur Tirtir Wed, 18 Dec 2019 14:32:42 +0000 - -citus (9.1.0.citus-1) stable; urgency=low - - * Adds extensions to distributed object propagation infrastructure - - * Adds support for ALTER ROLE propagation - - * Adds support for aggregates in create_distributed_function - - * Adds support for expressions in reference joins - - * Adds support for returning RECORD in multi-shard queries - - * Adds support for simple IN subqueries on unique cols in repartition joins - - * Adds support for subqueries in HAVING clauses - - * Automatically distributes unary aggs w/ combinefunc and non-internal stype - - * Disallows distributed func creation when replication_model is 'statement' - - * Drops support for deprecated real-time and router executors - - * Fixes a bug in local execution that could cause missing rows in RETURNING - - * Fixes a bug that caused maintenance daemon to fail on standby nodes - - * Fixes a bug that caused other CREATE EXTENSION commands to take longer - - * Fixes a bug that prevented REFRESH MATERIALIZED VIEW - - * Fixes a bug when view is used in modify statements - - * Fixes a memory leak in adaptive executor when query returns many columns - - * Fixes underflow init of default values in worker extended op node creation - - * Fixes potential segfault in standard_planner inlining functions - - * Fixes an issue that caused failures in RHEL 6 builds - - * Fixes queries with repartition joins and group by unique column - - * Improves CTE/Subquery performance by pruning intermediate rslt broadcasting - - * Removes citus.worker_list_file GUC - - * Revokes usage from the citus schema from public - - -- Onur Tirtir Thu, 28 Nov 2019 15:11:05 +0000 - -citus (9.0.1.citus-1) stable; urgency=low - - * Fixes a memory leak in the executor - - * Revokes usage from the citus schema from public - - -- Hanefi Onaldi Wed, 30 Oct 2019 8:53:22 +0000 - -citus (9.0.0.citus-1) stable; urgency=low - - * Adds support for PostgreSQL 12 - - * Adds UDFs to help with PostgreSQL upgrades - - * Distributes types to worker nodes - - * Introduces create_distributed_function UDF - - * Introduces local query execution for Citus MX - - * Implements infrastructure for routing CALL to MX workers - - * Implements infrastructure for routing SELECT function() to MX workers - - * Adds support for foreign key constraints between reference tables - - * Adds a feature flag to turn off CREATE TYPE propagation - - * Adds option citus.single_shard_commit_protocol - - * Adds support for EXPLAIN SUMMARY - - * Adds support for GENERATE ALWAYS AS STORED - - * Adds support for serial and smallserial in MX mode - - * Adds support for anon composite types on the target list in router queries - - * Avoids race condition between create_reference_table & master_add_node - - * Fixes a bug in schemas of distributed sequence definitions - - * Fixes a bug that caused run_command_on_colocated_placements to fail - - * Fixes a bug that leads to various issues when a connection is lost - - * Fixes a schema leak on CREATE INDEX statement - - * Fixes assert failure in bare SELECT FROM reference table FOR UPDATE in MX - - * Makes master_update_node MX compatible - - * Prevents pg_dist_colocation from multiple records for reference tables - - * Prevents segfault in worker_partition_protocol edgecase - - * Propagates ALTER FUNCTION statements for distributed functions - - * Propagates CREATE OR REPLACE FUNCTION for distributed functions - - * Propagates REINDEX on tables & indexes - - * Provides a GUC to turn of the new dependency propagation functionality - - * Uses 2PC in adaptive executor when dealing with replication factors above 1 - - -- Hanefi Onaldi Tue, 15 Oct 2019 16:54:50 +0000 - -citus (8.3.2.citus-1) stable; urgency=low - - * Fixes performance issues by skipping unnecessary relation access recordings - - -- Hanefi Onaldi Fri, 9 Aug 2019 11:15:57 +0000 - -citus (8.3.1.citus-1) stable; urgency=low - - * Improves Adaptive Executor performance - - -- Hanefi Onaldi Mon, 29 Jul 2019 10:25:50 +0000 - -citus (8.3.0.citus-1) stable; urgency=low - - * Adds a new distributed executor: Adaptive Executor - - * citus.enable_statistics_collection defaults to off (opt-in) - - * Adds support for CTEs in router planner for modification queries - - * Adds support for propagating SET LOCAL at xact start - - * Adds option to force master_update_node during failover - - * Deprecates master_modify_multiple_shards - - * Improves round robin logic on router queries - - * Creates all distributed schemas as superuser on a separate connection - - * Makes COPY adapt to connection use behaviour of previous commands - - * Replaces SESSION_LIFESPAN with configurable no. of connections at xact end - - * Propagates ALTER FOREIGN TABLE commands to workers - - * Don't schedule tasks on inactive nodes - - * Makes DROP/VALIDATE CONSTRAINT tolerant of ambiguous shard extension - - * Fixes an issue with subquery map merge jobs as non-root - - * Fixes null pointers caused by partial initialization of ConnParamsHashEntry - - * Fixes errors caused by joins with shadowed aliases - - * Fixes a regression in outer joining subqueries introduced in 8.2.0 - - * Fixes a crash that can occur under high memory load - - * Fixes a bug that selects wrong worker when using round-robin assignment - - * Fixes savepoint rollback after multi-shard modify/copy failure - - * Fixes bad foreign constraint name search - - * Fixes a bug that prevents stack size to be adjusted - - -- Hanefi Onaldi Wed, 10 Jul 2019 15:19:02 +0000 - -citus (8.2.2.citus-1) stable; urgency=low - - * Fixes a bug in outer joins wrapped in subqueries - - -- Burak Velioglu Wed, 12 Jun 2019 8:45:08 +0000 - -citus (8.2.1.citus-1) stable; urgency=low - - * Fixes a bug that prevents stack size to be adjusted - - -- Burak Velioglu Wed, 3 Apr 2019 20:56:47 +0000 - -citus (8.1.2.citus-1) stable; urgency=low - - * Don't do redundant ALTER TABLE consistency checks at coordinator - - * Fixes a bug that prevents stack size to be adjusted - - * Fix an issue with some DECLARE .. CURSOR WITH HOLD commands - - -- Burak Velioglu Wed, 3 Apr 2019 20:34:46 +0000 - -citus (8.2.0.citus-1) stable; urgency=low - - * Removes support and code for PostgreSQL 9.6 - - * Enable more outer joins with reference tables - - * Execute CREATE INDEX CONCURRENTLY in parallel - - * Treat functions as transaction blocks - - * Add support for column aliases on join clauses - - * Skip standard_planner() for trivial queries - - * Added support for function calls in joins - - * Round-robin task assignment policy relies on local transaction id - - * Relax subquery union pushdown restrictions for reference tables - - * Speed-up run_command_on_shards() - - * Address some memory issues in connection config - - * Restrict visibility of get_*_active_transactions functions to pg_monitor - - * Don't do redundant ALTER TABLE consistency checks at coordinator - - * Queries with only intermediate results do not rely on task assignment policy - - * Finish connection establishment in parallel for multiple connections - - * Fixes a bug related to pruning shards using a coerced value - - * Fix an issue with some DECLARE .. CURSOR WITH HOLD commands - - * Fixes a bug that could lead to infinite recursion during recursive planning - - * Fixes a bug that could prevent planning full outer joins with using clause - - * Fixes a bug that could lead to memory leak on citus_relation_size - - * Fixes a problem that could cause segmentation fault with recursive planning - - * Switch CI solution to CircleCI - - -- Burak Velioglu Fri, 29 Mar 2019 07:36:09 +0000 - -citus (8.0.3.citus-1) stable; urgency=low - - * Fixes maintenance daemon panic due to unreleased spinlock - - * Fixes an issue with having clause when used with complex joins - - -- Hanefi Onaldi Wed, 9 Jan 2019 9:50:07 +0000 - -citus (8.1.1.citus-1) stable; urgency=low - - * Fixes maintenance daemon panic due to unreleased spinlock - - * Fixes an issue with having clause when used with complex joins - - -- Hanefi Onaldi Mon, 7 Jan 2019 16:26:13 +0000 - -citus (8.1.0.citus-1) stable; urgency=low - - * Turns on ssl by default for new installations of citus - - * Restricts SSL Ciphers to TLS1.2 and above - - * Adds support for INSERT INTO SELECT..ON CONFLICT/RETURNING via coordinator - - * Adds support for round-robin task assignment for queries to reference tables - - * Adds support for SQL tasks using worker_execute_sql_task UDF with task-tracker - - * Adds support for VALIDATE CONSTRAINT queries - - * Adds support for disabling hash aggregate with HLL - - * Adds user ID suffix to intermediate files generated by task-tracker - - * Only allow transmit from pgsql_job_cache directory - - * Disallows GROUPING SET clauses in subqueries - - * Removes restriction on user-defined group ID in node addition functions - - * Relaxes multi-shard modify locks when enable_deadlock_prevention is disabled - - * Improves security in task-tracker protocol - - * Improves permission checks in internal DROP TABLE functions - - * Improves permission checks in cluster management functions - - * Cleans up UDFs and fixes permission checks - - * Fixes crashes caused by stack size increase under high memory load - - * Fixes a bug that could cause maintenance daemon panic - - -- Burak Velioglu Tue, 18 Dec 2018 15:12:45 +0000 - -citus (8.0.2.citus-1) stable; urgency=low - - * Fixes a bug that could cause maintenance daemon panic - - * Fixes crashes caused by stack size increase under high memory load - - -- Burak Velioglu Thu, 13 Dec 2018 13:56:44 +0000 - -citus (7.5.4.citus-1) stable; urgency=low - - * Fixes a bug that could cause maintenance daemon panic - - -- Burak Velioglu Wed, 12 Dec 2018 11:45:24 +0000 - -citus (8.0.1.citus-1) stable; urgency=low - - * Execute SQL tasks using worker_execute_sql_task UDF when using task-tracker - - -- Burak Velioglu Wed, 28 Nov 2018 11:38:47 +0000 - -citus (7.5.3.citus-1) stable; urgency=low - - * Execute SQL tasks using worker_execute_sql_task UDF when using task-tracker - - -- Burak Velioglu Wed, 28 Nov 2018 10:52:20 +0000 - -citus (7.5.2.citus-1) stable; urgency=low - - * Fixes inconsistent metadata error when shard metadata caching get interrupted - - * Fixes a bug that could cause memory leak - - * Fixes a bug that prevents recovering wrong transactions in MX - - * Fixes a bug to prevent wrong memory accesses on Citus MX under very high load - - * Fixes crashes caused by stack size increase under high memory load - - -- Burak Velioglu Wed, 14 Nov 2018 20:42:16 +0000 - -citus (8.0.0.citus-1) stable; urgency=low - - * Adds support for PostgreSQL 11 - - * Adds support for applying DML operations on reference tables from MX nodes - - * Adds distributed locking to truncated MX tables - - * Adds support for running TRUNCATE command from MX worker nodes - - * Adds views to provide insight about the distributed transactions - - * Adds support for TABLESAMPLE in router queries - - * Adds support for INCLUDE option in index creation - - * Adds option to allow simple DML commands from hot standby - - * Adds support for partitioned tables with replication factor > 1 - - * Prevents a deadlock on concurrent DROP TABLE and SELECT on Citus MX - - * Fixes a bug that prevents recovering wrong transactions in MX - - * Fixes a bug to prevent wrong memory accesses on Citus MX under very high load - - * Fixes a bug in MX mode, calling DROP SCHEMA with existing partitioned table - - * Fixes a bug that could cause modifying CTEs to select wrong execution mode - - * Fixes a bug preventing rollback in CREATE PROCEDURE - - * Fixes a bug on not being able to drop index on a partitioned table - - * Fixes a bug on TRUNCATE when there is a foreign key to a reference table - - * Fixes a performance issue in prepared INSERT..SELECT - - * Fixes a bug which causes errors on DROP DATABASE IF EXISTS - - * Fixes a bug to remove intermediate result directory in pull-push execution - - * Improves query pushdown planning performance - - * Evaluate functions anywhere in query - - -- Burak Velioglu Fri, 02 Nov 2018 08:06:42 +0000 - -citus (7.5.1.citus-1) stable; urgency=low - - * Improves query pushdown planning performance - - * Fixes a bug that could cause modifying CTEs to select wrong execution mode - - -- Burak Velioglu Wed, 29 Aug 2018 08:06:42 +0000 - -citus (7.4.2.citus-1) stable; urgency=low - - * Fixes a segfault in real-time executor during online shard move - - -- Mehmet Furkan Sahin Fri, 27 Jul 2018 13:42:27 +0000 - -citus (7.5.0.citus-1) stable; urgency=low - - * Adds foreign key support from hash distributed to reference tables - - * Adds SELECT ... FOR UPDATE support for router plannable queries - - * Adds support for non-partition columns in count distinct - - * Fixes a segfault in real-time executor during online shard move - - * Fixes ALTER TABLE ADD COLUMN constraint check - - * Fixes a bug where INSERT ... SELECT allows one to update dist. column - - * Allows DDL commands to be sequentialized via citus.multi_shard_modify_mode - - * Adds support for topn_union_agg and topn_add_agg across shards - - * Adds support for hll_union_agg and hll_add_agg across shards - - * Fixes a bug that might cause shards to have a wrong owner - - * Adds select_opens_transaction_block GUC - - * Adds utils to implement DDLs for policies in future - - * Makes intermediate results to use separate connections - - * Adds a node_conninfo GUC to set outgoing connection settings - - -- Mehmet Furkan Sahin Wed, 25 Jul 2018 9:32:24 +0000 - -citus (6.2.6.citus-1) stable; urgency=low - - * Adds support for respecting enable_hashagg in the master planner - - -- Burak Velioglu Fri, 06 Jul 2018 13:30:08 +0000 - -citus (7.4.1.citus-1) stable; urgency=low - - * Fixes a bug that could cause txns to incorrectly proceed after failure - - * Fixes a bug on INSERT ... SELECT queries in prepared statements - - -- Burak Velioglu Wed, 20 Jun 2018 12:25:30 +0000 - -citus (7.2.2.citus-1) stable; urgency=low - - * Fixes a bug that could cause SELECTs to crash during a rebalance - - -- Burak Velioglu Thu, 17 May 2018 11:51:56 +0000 - -citus (7.4.0.citus-1) stable; urgency=low - - * Adds support for non-pushdownable subqueries and CTEs in UPDATE/DELETE - - * Adds support for pushdownable subqueries and joins in UPDATE/DELETE - - * Adds faster shard pruning for subqueries - - * Adds partitioning support to MX table - - * Adds support for (VACUUM | ANALYZE) VERBOSE - - * Adds support for multiple ANDs in HAVING for pushdown planner - - * Adds support for quotation needy schema names - - * Improves operator check time in physical planner for custom data types - - * Removes broadcast join logic - - * Deprecates large_table_shard_count and master_expire_table_cache() - - * Modifies master_update_node to write-lock shards hosted by node over update - - * DROP TABLE now drops shards as the currrent user instead of the superuser - - * Adds specialised error codes for connection failures - - * Improves error messages on connection failure - - * Fixes issue which prevented multiple citus_table_size calls per query - - * Tests are updated to use create_distributed_table - - -- Burak Velioglu Tue, 15 May 2018 13:01:17 +0000 - -citus (7.3.0.citus-1) stable; urgency=low - - * Adds support for non-colocated joins between subqueries - - * Adds support for window functions that can be pushed down to worker - - * Adds support for modifying CTEs - - * Adds recursive plan for WHERE clause subqueries with recurring FROM clause - - * Adds support for bool_ and bit_ aggregates - - * Adds support for Postgres jsonb and json aggregation functions - - * Adds support for respecting enable_hashagg in the master plan - - * Performance improvements to reduce distributed planning time - - * Fixes a bug on planner when aggregate is used in ORDER BY - - * Fixes a bug on planner when DISTINCT (ON) clause is used with GROUP BY - - * Fixes a planner bug with distinct and aggregate clauses - - * Fixes a bug that opened new connections on each table size function call - - * Fixes a bug canceling backends not involved in distributed deadlocks - - * Fixes count distinct bug on column expressions when used with subqueries - - * Improves error handling on worker node failures - - * Improves error messages for INSERT queries that have subqueries - - -- Burak Velioglu Thu, 15 Mar 2018 14:16:10 +0000 - -citus (7.2.1.citus-1) stable; urgency=low - - * Fixes count distinct bug on column expressions when used with subqueries - - * Adds support for respecting enable_hashagg in the master plan - - * Fixes a bug canceling backends not involved in distributed deadlocks - - -- Burak Velioglu Tue, 06 Feb 2018 14:46:07 +0000 - -citus (7.2.0.citus-1) stable; urgency=low - - * Adds support for CTEs - - * Adds support for subqueries that require merge step - - * Adds support for set operations (UNION, INTERSECT, ...) - - * Adds support for 2PC auto-recovery - - * Adds support for querying local tables in CTEs and subqueries - - * Adds support for more SQL coverage in subqueries for reference tables - - * Adds support for count(distinct) in queries with a subquery - - * Adds support for non-equijoins when there is already an equijoin - - * Adds support for real-time executor to run in transaction blocks - - * Adds infrastructure for storing intermediate distributed query results - - * Adds a new GUC named enable_repartition_joins for auto executor switch - - * Adds support for limiting the intermediate result size - - * Improves support for queries with unions containing filters - - * Improves support for queries with unions containing joins - - * Improves support for subqueries in the WHERE clause - - * Increases COPY throughput - - * Enables pushing down queries containing only recurring tuples and GROUP BY - - * Load-balance queries that read from 0 shards - - * Improves support for using functions in subqueries - - * Fixes a bug that causing real-time executor to crash during cancellation - - * Fixes a bug that causing real-time executor to get stuck on cancellation - - * Fixes a bug that could block modification queries unnecessarily - - * Fixes a bug that could cause assigning wrong IDs to transactions - - * Fixes a bug that could cause an assert failure with ANALYZE statements - - * Fixes a bug that would push down wrong set operations in subqueries - - * Fixes a bug that could cause a deadlock in create_distributed_table - - * Fixes a bug that could confuse user about ANALYZE usage - - * Fixes a bug causing false positive distributed deadlock detections - - * Relaxes the locking for DDL commands on partitioned tables - - * Relaxes the locking on COPY with replication - - * Logs more remote commands when citus.log_remote_commands is set - - -- Burak Velioglu Tue, 16 Jan 2018 14:34:20 +0000 - -citus (6.2.5.citus-1) stable; urgency=low - - * Fixes a bug that could crash the coordinator while reporting a remote error - - -- Burak Velioglu Thu, 11 Jan 2018 11:40:28 +0000 - -citus (7.1.2.citus-1) stable; urgency=low - - * Fixes a bug that could cause assigning wrong IDs to transactions - - * Increases COPY throughput - - -- Burak Velioglu Fri, 05 Jan 2018 09:00:07 +0000 - -citus (7.1.1.citus-1) stable; urgency=low - - * Fixes a bug preventing pushing down subqueries with reference tables - - * Fixes a bug that could create false positive distributed deadlocks - - * Fixes a bug that could prevent running concurrent COPY and multi-shard DDL - - * Fixes a bug that could mislead users about ANALYZE queries - - -- Burak Velioglu Tue, 05 Dec 2017 09:00:07 +0000 - -citus (7.1.0.citus-1) stable; urgency=low - - * Adds support for native queries with multi shard UPDATE/DELETE queries - - * Expands reference table support in subquery pushdown - - * Adds window function support for subqueries and INSERT ... SELECT queries - - * Adds support for COUNT(DISTINCT) [ON] queries on non-partition columns - - * Adds support for DISTINCT [ON] queries on non-partition columns - - * Introduces basic usage statistic collector - - * Adds support for setting replica identity while creating distributed tables - - * Adds support for ALTER TABLE ... REPLICA IDENTITY queries - - * Adds pushdown support for LIMIT and HAVING grouped by partition key - - * Adds support for INSERT ... SELECT queries via worker nodes on MX clusters - - * Adds support for adding primary key using already defined index - - * Adds replication parameter to shard copy functions - - * Changes shard_name UDF to omit public schema name - - * Adds master_move_node UDF to make changes on nodename/nodeport more easy - - * Fixes a bug that could cause casting error with INSERT ... SELECT queries - - * Fixes a bug that could prevent upgrading servers from Citus 6.1 - - * Fixes a bug that could prevent attaching partitions to a table in schema - - * Fixes a bug preventing adding nodes to clusters with reference tables - - * Fixes a bug that could cause a crash with INSERT ... SELECT queries - - * Fixes a bug that could prevent creating a partitoned table on Cloud - - * Implements various performance improvements - - * Adds internal infrastructures and tests to improve development process - - * Addresses various race conditions and deadlocks - - * Improves and standardizes error messages - - -- Burak Velioglu Wed, 15 Nov 2017 09:00:07 +0000 - -citus (7.0.3.citus-1) stable; urgency=low - - * Fixes several bugs that could cause crash - - * Fixes a bug that could cause deadlock while creating reference tables - - * Fixes a bug that could cause false-positives in deadlock detection - - * Fixes a bug that could cause 2PC recovery not to work from MX workers - - * Fixes a bug that could cause cache incohorency - - * Fixes a bug that could cause maintenance daemon to skip cache invalidations - - * Improves performance of transaction recovery by using correct index - - -- Burak Yucesoy Mon, 16 Oct 2017 11:52:07 +0000 - -citus (7.0.2.citus-1) stable; urgency=low - - * Updates task-tracker to limit file access - - -- Burak Yucesoy Thu, 28 Sep 2017 22:29:01 +0000 - -citus (6.2.4.citus-1) stable; urgency=low - - * Updates task-tracker to limit file access - - -- Burak Yucesoy Thu, 28 Sep 2017 21:31:35 +0000 - -citus (6.1.3.citus-1) stable; urgency=low - - * Updates task-tracker to limit file access - - -- Burak Yucesoy Thu, 28 Sep 2017 20:31:35 +0000 - -citus (7.0.1.citus-1) stable; urgency=low - - * Fixes a bug that could cause memory leaks in INSERT ... SELECT queries - - * Fixes a bug that could cause incorrect execution of prepared statements - - * Fixes a bug that could cause excessive memory usage during COPY - - * Incorporates latest changes from core PostgreSQL code - - -- Burak Yucesoy Tue, 12 Sep 2017 17:53:50 +0000 - -citus (7.0.0.citus-1) stable; urgency=low - - * Adds support for PostgreSQL 10 - - * Drops support for PostgreSQL 9.5 - - * Adds support for multi-row INSERT - - * Adds support for router UPDATE and DELETE queries with subqueries - - * Adds infrastructure for distributed deadlock detection - - * Deprecates enable_deadlock_prevention flag - - * Adds support for partitioned tables - - * Adds support for creating UNLOGGED tables - - * Adds support for SAVEPOINT - - * Adds UDF citus_create_restore_point for taking distributed snapshots - - * Adds support for evaluating non-pushable INSERT ... SELECT queries - - * Adds support for subquery pushdown on reference tables - - * Adds shard pruning support for IN and ANY - - * Adds support for UPDATE and DELETE commands that prune down to 0 shard - - * Enhances transaction support by relaxing some transaction restrictions - - * Fixes a bug causing crash if distributed table has no shards - - * Fixes a bug causing crash when removing inactive node - - * Fixes a bug causing failure during COPY on tables with dropped columns - - * Fixes a bug causing failure during DROP EXTENSION - - * Fixes a bug preventing executing VACUUM and INSERT concurrently - - * Fixes a bug in prepared INSERT statements containing an implicit cast - - * Fixes several issues related to statement cancellations and connections - - * Fixes several 2PC related issues - - * Removes an unnecessary dependency causing warning messages in pg_dump - - * Adds internal infrastructure for follower clusters - - * Adds internal infrastructure for progress tracking - - * Implements various performance improvements - - * Adds internal infrastructures and tests to improve development process - - * Addresses various race conditions and deadlocks - - * Improves and standardizes error messages - - -- Burak Yucesoy Mon, 28 Aug 2017 12:27:53 +0000 - -citus (6.2.3.citus-1) stable; urgency=low - - * Fixes a crash during execution of local CREATE INDEX CONCURRENTLY - - * Fixes a bug preventing usage of quoted column names in COPY - - * Fixes a bug in prepared INSERTs with implicit cast in partition column - - * Relaxes locks in VACUUM to ensure concurrent execution with INSERT - - -- Burak Yucesoy Thu, 13 Jul 2017 11:27:17 +0000 - -citus (6.2.2.citus-1) stable; urgency=low - - * Fixes a common cause of deadlocks when repairing tables with foreign keys - - -- Burak Velioglu Wed, 07 Jun 2017 09:42:17 +0000 - -citus (6.1.2.citus-1) stable; urgency=low - - * Fixes a common cause of deadlocks when repairing tables with foreign keys - - -- Jason Petersen Wed, 31 May 2017 16:14:11 +0000 - -citus (6.2.1.citus-1) stable; urgency=low - - * Relaxes version-check logic to avoid breaking non-distributed commands - - -- Jason Petersen Wed, 24 May 2017 22:36:07 +0000 - -citus (6.2.0.citus-1) stable; urgency=low - - * Increases SQL subquery coverage by pushing down more kinds of queries - - * Adds CustomScan API support to allow read-only transactions - - * Adds support for CREATE/DROP INDEX CONCURRENTLY - - * Adds support for ALTER TABLE ... ADD CONSTRAINT - - * Adds support for ALTER TABLE ... RENAME COLUMN - - * Adds support for DISABLE/ENABLE TRIGGER ALL - - * Adds support for expressions in the partition column in INSERTs - - * Adds support for query parameters in combination with function evaluation - - * Adds support for creating distributed tables from non-empty local tables - - * Adds UDFs to get size of distributed tables - - * Adds UDFs to add a new node without replicating reference tables - - * Adds checks to prevent running Citus binaries with wrong metadata tables - - * Improves shard pruning performance for range queries - - * Improves planner performance for joins involving co-located tables - - * Improves shard copy performance by creating indexes after copy - - * Improves task-tracker performance by batching several status checks - - * Enables router planner for queries on range partitioned table - - * Changes TRUNCATE to drop local data only if enable_ddl_propagation is off - - * Starts to execute DDL on coordinator before workers - - * Fixes a bug causing incorrectly reading invalidated cache - - * Fixes a bug related to creation of schemas in workers with incorrect owner - - * Fixes a bug related to concurrent run of shard drop functions - - * Fixes a bug related to EXPLAIN ANALYZE with DML queries - - * Fixes a bug related to SQL functions in FROM clause - - * Adds a GUC variable to report cross shard queries - - * Fixes a bug related to partition columns without native hash function - - * Adds internal infrastructures and tests to improve development process - - * Addresses various race conditions and deadlocks - - * Improves and standardizes error messages - - -- Burak Yucesoy Tue, 16 May 2017 16:05:22 +0000 - -citus (6.1.1.citus-1) stable; urgency=low - - * Fixes a crash caused by router executor use after connection timeouts - - * Fixes a crash caused by relation cache invalidation during COPY - - * Fixes bug related to DDL use within PL/pgSQL functions - - * Fixes a COPY bug related to types lacking binary output functions - - * Fixes a bug related to modifications with parameterized partition values - - * Fixes improper value interpolation in worker sequence generation - - * Guards shard pruning logic against zero-shard tables - - * Fixes possible NULL pointer dereference and buffer underflow, via PVS-Studio - - * Fixes a INSERT...SELECT bug that could push down non-partition column JOINs - - -- Metin Doslu Fri, 5 May 2017 17:42:00 +0000 - -citus (6.1.0.citus-1) stable; urgency=low - - * Implements reference tables, transactionally replicated to all nodes - - * Adds upgrade_to_reference_table UDF to upgrade pre-6.1 reference tables - - * Expands prepared statement support to nearly all statements - - * Adds support for creating VIEWs which reference distributed tables - - * Adds targeted VACUUM/ANALYZE support - - * Adds support for the FILTER clause in aggregate expressions - - * Adds support for function evaluation within INSERT INTO ... SELECT - - * Adds support for creating foreign key constraints with ALTER TABLE - - * Adds logic to choose router planner for all queries it supports - - * Enhances create_distributed_table with parameter for explicit colocation - - * Adds generally useful utility UDFs previously available as "Citus Tools" - - * Adds user-facing UDFs for locking shard resources and metadata - - * Refactors connection and transaction management for more consistency - - * Enhances COPY with fully transactional semantics - - * Improves support for cancellation for a number of queries and commands - - * Adds column_to_column_name UDF to help users understand partkey values - - * Adds master_disable_node UDF for temporarily disabling nodes - - * Adds proper MX ("masterless") metadata propagation logic - - * Adds start_metadata_sync_to_node UDF to propagate metadata changes to nodes - - * Enhances SERIAL compatibility with MX tables - - * Adds an node_connection_timeout parameter to set node connection timeouts - - * Adds enable_deadlock_prevention setting to permit multi-node transactions - - * Adds a replication_model setting to specify replication of new tables - - * Changes the shard_replication_factor setting's default value to one - - * Adds code to automatically set max_prepared_transactions if not configured - - * Accelerates lookup of colocated shard placements - - * Fixes a bug affecting INSERT INTO ... SELECT queries using constant values - - * Fixes a bug by ensuring COPY does not mark placements inactive - - * Fixes a bug affecting reads from pg_dist_shard_placement table - - * Fixes a crash triggered by creating a foreign key without a column - - * Fixes a crash related to accessing catalog tables after aborted transaction - - * Fixes a bug affecting JOIN queries requiring repartitions - - * Fixes a bug affecting node insertions to pg_dist_node table - - * Fixes a crash triggered by queries with modifying common table expressions - - * Fixes a bug affecting workloads with concurrent shard appends and deletions - - * Addresses various race conditions and deadlocks - - * Improves and standardizes error messages - - -- Burak Yucesoy Thu, 9 Feb 2017 16:17:41 +0000 - -citus (6.0.1.citus-3) stable; urgency=low - - * First build using new versioning practices - - -- Jason Petersen Wed, 8 Feb 2017 23:19:46 +0000 - -citus (6.0.1.citus-2) stable; urgency=low - - * Transitional package to guide users to new package name - - -- Jason Petersen Mon, 6 Feb 2017 16:33:44 +0000 - -citus (6.0.1.citus-1) stable; urgency=low - - * Fixes a bug causing failures during pg_upgrade - - * Fixes a bug preventing DML queries during colocated table creation - - * Fixes a bug that caused NULL parameters to be incorrectly passed as text - - -- Burak Yucesoy Wed, 30 Nov 2016 15:27:38 +0000 - -citus (6.0.0.citus-1) stable; urgency=low - - * Adds compatibility with PostgreSQL 9.6, now the recommended version - - * Removes the pg_worker_list.conf file in favor of a pg_dist_node table - - * Adds master_add_node and master_add_node UDFs to manage membership - - * Removes the \stage command and corresponding csql binary in favor of COPY - - * Removes copy_to_distributed_table in favor of first-class COPY support - - * Adds support for multiple DDL statements within a transaction - - * Adds support for certain foreign key constraints - - * Adds support for parallel INSERT INTO ... SELECT against colocated tables - - * Adds support for the TRUNCATE command - - * Adds support for HAVING clauses in SELECT queries - - * Adds support for EXCLUDE constraints which include the partition column - - * Adds support for system columns in queries (tableoid, ctid, etc.) - - * Adds support for relation name extension within INDEX definitions - - * Adds support for no-op UPDATEs of the partition column - - * Adds several general-purpose utility UDFs to aid in Citus maintenance - - * Adds master_expire_table_cache UDF to forcibly expire cached shards - - * Parallelizes the processing of DDL commands which affect distributed tables - - * Adds support for repartition jobs using composite or custom types - - * Enhances object name extension to handle long names and large shard counts - - * Parallelizes the master_modify_multiple_shards UDF - - * Changes distributed table creation to error if target table is not empty - - * Changes the pg_dist_shard.logicalrelid column from an oid to regclass - - * Adds a placementid column to pg_dist_shard_placement, replacing Oid use - - * Removes the pg_dist_shard.shardalias distribution metadata column - - * Adds pg_dist_partition.repmodel to track tables using streaming replication - - * Adds internal infrastructure to take snapshots of distribution metadata - - * Addresses the need to invalidate prepared statements on metadata changes - - * Adds a mark_tables_colocated UDF for denoting pre-6.0 manual colocation - - * Fixes a bug affecting prepared statement execution within PL/pgSQL - - * Fixes a bug affecting COPY commands using composite types - - * Fixes a bug that could cause crashes during EXPLAIN EXECUTE - - * Separates worker and master job temporary folders - - * Eliminates race condition between distributed modification and repair - - * Relaxes the requirement that shard repairs also repair colocated shards - - * Implements internal functions to track which tables' shards are colocated - - * Adds pg_dist_partition.colocationid to track colocation group membership - - * Extends shard copy and move operations to respect colocation settings - - * Adds pg_dist_local_group to prepare for future MX-related changes - - * Adds create_distributed_table to easily create shards and infer colocation - - -- Jason Petersen Tue, 8 Nov 2016 19:45:45 +0000 - -citus (5.2.2.citus-1) stable; urgency=low - - * Adds support for IF NOT EXISTS clause of CREATE INDEX command - - * Adds support for RETURN QUERY and FOR ... IN PL/pgSQL features - - * Extends the router planner to handle more queries - - * Changes COUNT of zero-row sets to return 0 rather than an empty result - - * Reduces the minimum permitted task_tracker_delay to a single millisecond - - * Fixes a bug that caused crashes during joins with a WHERE false clause - - * Fixes a bug triggered by unique violation errors raised in long txns - - * Fixes a bug resulting in multiple registration of transaction callbacks - - * Fixes a bug which could result in stale reads of distribution metadata - - * Fixes a bug preventing distributed modifications in some PL/pgSQL functions - - * Fixes some code paths that could hypothetically read uninitialized memory - - * Lowers log level of "waiting for activity" messages - - -- Jason Petersen Tue, 8 Nov 2016 18:43:37 +0000 - -citus (5.2.1.citus-1) stable; urgency=low - - * Fixes subquery pushdown to properly extract outer join qualifiers - - * Addresses possible memory leak during multi-shard transactions - - -- Jason Petersen Tue, 6 Sep 2016 20:47:15 +0000 - -citus (5.2.0.citus-1) stable; urgency=low - - * Drops support for PostgreSQL 9.4; PostgreSQL 9.5 is required - - * Adds schema support for tables, named objects (types, operators, etc.) - - * Evaluates non-immutable functions on master in all modification commands - - * Adds support for SERIAL types in non-partition columns - - * Adds support for RETURNING clause in INSERT, UPDATE, and DELETE commands - - * Adds support for multi-statement transactions using a fixed set of nodes - - * Full SQL support for SELECT queries which can be executed on single worker - - * Adds option to perform DDL changes using prepared transactions (2PC) - - * Adds an enable_ddl_propagation parameter to control DDL propagation - - * Accelerates shard pruning during merges - - * Adds master_modify_multiple_shards UDF to modify many shards at once - - * Adds COPY support for arrays of user-defined types - - * Now supports parameterized prepared statements for certain use cases - - * Extends LIMIT/OFFSET support to all executor types - - * Constraint violations now fail fast rather than hitting all placements - - * Makes master_create_empty_shard aware of shard placement policy - - * Reduces unnecessary sleep during queries processed by real-time executor - - * Improves task tracker executor's task cleanup logic - - * Relaxes restrictions on cancellation of DDL commands - - * Removes ONLY keyword from worker SELECT queries - - * Error message improvements and standardization - - * Moves master_update_shard_statistics function to pg_catalog schema - - * Fixes a bug where hash-partitioned anti-joins could return bad results - - * Now sets storage type correctly for foreign table-backed shards - - * Fixes master_update_shard_statistics issue with hash-partitioned tables - - * Fixes an issue related to extending table names that require escaping - - * Reduces risk of row counter overflows during modifications - - * Fixes a crash related to FILTER clause use in COUNT DISTINCT subqueries - - * Fixes crashes related to partition columns with high attribute numbers - - * Fixes certain subquery and join crashes - - * Detects flex for build even if PostgreSQL was built without it - - * Fixes assert-enabled crash when all_modifications_commutative is true - - -- Jason Petersen Wed, 17 Aug 2016 10:23:21 +0000 - -citus (5.2.0~rc.1-1) testing; urgency=low - - * Release candidate for 5.2. - - -- Jason Petersen Mon, 01 Aug 2016 17:01:05 +0000 - -citus (5.1.1-1) stable; urgency=low - - * Adds complex count distinct expression support in repartitioned subqueries - - * Improves task tracker job cleanup logic, addressing a memory leak - - * Fixes bug that generated incorrect results for LEFT JOIN queries - - * Improves compatibility with Debian's reproducible builds project - - * Fixes build issues on FreeBSD platforms - - -- Jason Petersen Fri, 17 Jun 2016 16:20:15 +0000 - -citus (5.1.0-1) stable; urgency=low - - * Adds distributed COPY to rapidly populate distributed tables - - * Adds support for using EXPLAIN on distributed queries - - * Recognizes and fast-paths single-shard SELECT statements automatically - - * Increases INSERT throughput via shard pruning optimizations - - * Improves planner performance for joins involving tables with many shards - - * Adds ability to pass columns as arguments to function calls in UPDATEs - - * Introduces transaction manager for use by multi-shard commands - - * Adds COUNT(DISTINCT ...) pushdown optimization for hash-partitioned tables - - * Adds support for some UNIQUE indexes on hash- or range-partitioned tables - - * Deprecates \stage in favor of using COPY for append-partition tables - - * Deprecates copy_to_distributed_table in favor of first-class COPY support - - * Fixes build problems when using non-packaged PostgreSQL installs - - * Fixes bug that sometimes skipped pruning when partitioned by VARCHAR column - - * Fixes bug impeding use of user functions in repartitioned subqueries - - * Fixes bug involving queries with equality comparisons of boolean types - - * Fixes crash that prevented use alongside pg_stat_statements - - * Fixes crash arising from SELECT queries that lack a target list - - * Improves warning and error messages - - -- Jason Petersen Tue, 17 May 2016 16:55:02 +0000 - -citus (5.1.0~rc.2-1) testing; urgency=low - - * Fix EXPLAIN output when FORMAT JSON in use - - -- Jason Petersen Mon, 16 May 2016 11:08:09 +0000 - -citus (5.1.0~rc.1-1) testing; urgency=low - - * Release candidate for 5.1. - - -- Jason Petersen Wed, 04 May 2016 19:26:23 +0000 - -citus (5.0.1-1) stable; urgency=low - - * Fixes issues on 32-bit systems - - -- Jason Petersen Fri, 15 Apr 2016 19:17:35 +0000 - -citus (5.0.0-1) stable; urgency=low - - * Initial release - - -- Jason Petersen Thu, 24 Mar 2016 10:12:52 -0400 +citus (10.2.4.citus-1) stable; urgency=low + + * Official 10.2.4 release of Citus + + -- Gurkan Indibay Tue, 01 Feb 2022 12:00:47 +0000 + +citus (10.1.4.citus-1) stable; urgency=low + + * Official 10.1.4 release of Citus + + -- Gurkan Indibay Tue, 01 Feb 2022 11:49:11 +0000 + +citus (10.2.3.citus-1) stable; urgency=low + + * Official 10.2.3 release of Citus + + -- Gurkan Indibay Mon, 29 Nov 2021 11:00:41 +0000 + +citus (10.0.6.citus-1) stable; urgency=low + + * Official 10.0.6 release of Citus + + -- Gurkan Indibay Fri, 12 Nov 2021 11:37:25 +0000 + +citus (9.5.10.citus-1) stable; urgency=low + + * Official 9.5.10 release of Citus + + -- Gurkan Indibay Mon, 08 Nov 2021 14:18:56 +0000 + +citus (9.2.8.citus-1) stable; urgency=low + + * Official 9.2.8 release of Citus + + -- Gurkan Indibay Thu, 04 Nov 2021 12:45:09 +0000 + +citus (9.2.7.citus-1) stable; urgency=low + + * Official 9.2.7 release of Citus + + -- Gurkan Indibay Wed, 03 Nov 2021 09:34:30 +0000 + +citus (10.2.2.citus-1) stable; urgency=low + + * Official 10.2.2 release of Citus + + -- Gurkan Indibay Thu, 14 Oct 2021 13:00:27 +0000 + +citus (10.2.1.citus-1) stable; urgency=low + + * Adds missing version-mismatch checks for columnar tables + + * Adds missing version-mismatch checks for internal functions + + * Fixes a bug that could cause partition shards being not co-located with + parent shards + + * Fixes a bug that prevents pushing down boolean expressions when using + columnar custom scan + + * Fixes a clog lookup failure that could occur when writing to a columnar + table + + * Fixes an issue that could cause unexpected errors when there is an + in-progress write to a columnar table + + * Revokes read access to `columnar.chunk` from unprivileged user + + -- Gurkan Indibay Fri, 24 Sep 2021 12:03:35 +0000 + +citus (10.1.3.citus-1) stable; urgency=low + + * Official 10.1.3 release of Citus + + -- Gurkan Indibay Fri, 17 Sep 2021 17:23:05 +0000 + +citus (10.2.0.citus-1) stable; urgency=low + + * Adds PostgreSQL 14 support + + * Adds hash & btree index support for columnar tables + + * Adds helper UDFs for easy time partition management: + `get_missing_time_partition_ranges`, `create_time_partitions`, and + `drop_old_time_partitions` + + * Adds propagation of ALTER SEQUENCE + + * Adds support for ALTER INDEX ATTACH PARTITION + + * Adds support for CREATE INDEX ON ONLY + + * Allows more graceful failovers when replication factor > 1 + + * Enables chunk group filtering to work with Params for columnar tables + + * Enables qual push down for joins including columnar tables + + * Enables transferring of data using binary encoding by default on PG14 + + * Improves `master_update_table_statistics` and provides distributed deadlock + detection + + * Includes `data_type` and `cache` in sequence definition on worker + + * Makes start/stop_metadata_sync_to_node() transactional + + * Makes sure that table exists before updating table statistics + + * Prevents errors with concurrent `citus_update_table_statistics` and DROP table + + * Reduces memory usage of columnar table scans by freeing the memory used for + last stripe read + + * Shows projected columns for columnar tables in EXPLAIN output + + * Speeds up dropping partitioned tables + + * Synchronizes hasmetadata flag on mx workers + + * Uses current user while syncing metadata + + * Adds a parameter to cleanup metadata when metadata syncing is stopped + + * Fixes a bug about int and smallint sequences on MX + + * Fixes a bug that cause partitions to have wrong distribution key after + DROP COLUMN + + * Fixes a bug that caused `worker_append_table_to_shard` to write as superuser + + * Fixes a bug that caused `worker_create_or_alter_role` to crash with NULL input + + * Fixes a bug that causes pruning incorrect shard of a range distributed table + + * Fixes a bug that may cause crash while aborting transaction + + * Fixes a bug that prevents attaching partitions when colocated foreign key + exists + + * Fixes a bug with `nextval('seq_name'::text)` + + * Fixes a crash in shard rebalancer when no distributed tables exist + + * Fixes a segfault caused by use after free in when using a cached connection + + * Fixes a UNION pushdown issue + + * Fixes a use after free issue that could happen when altering a distributed + table + + * Fixes showing target shard size in the rebalance progress monitor + + -- Gurkan Indibay Thu, 16 Sep 2021 08:45:17 +0000 + +citus (10.1.2.citus-1) stable; urgency=low + + * Allows more graceful failovers when replication factor > 1 + + * Fixes a bug that causes partitions to have wrong distribution key after + `DROP COLUMN` + + -- Gurkan Indibay Tue, 17 Aug 2021 16:25:14 +0000 + +citus (10.0.5.citus-1) stable; urgency=low + + * Allows more graceful failovers when replication factor > 1 + + * Fixes a bug that causes partitions to have wrong distribution key after + `DROP COLUMN` + + * Improves citus_update_table_statistics and provides distributed deadlock + detection + + -- Gurkan Indibay Tue, 17 Aug 2021 14:42:54 +0000 + +citus (9.5.7.citus-1) stable; urgency=low + + * Allows more graceful failovers when replication factor > 1 + + * Fixes a bug that causes partitions to have wrong distribution key after + `DROP COLUMN` + + * Improves master_update_table_statistics and provides distributed deadlock + detection + + -- Gurkan Indibay Tue, 17 Aug 2021 13:18:11 +0000 + +citus (9.4.6.citus-1) stable; urgency=low + + * Allows more graceful failovers when replication factor > 1 + + * Improves master_update_table_statistics and provides distributed deadlock + detection + + -- Gurkan Indibay Wed, 11 Aug 2021 08:57:04 +0000 + +citus (10.1.1.citus-1) stable; urgency=low + + * Improves citus_update_table_statistics and provides distributed deadlock + detection + + * Fixes showing target shard size in the rebalance progress monitor + + -- Gurkan Indibay Fri, 06 Aug 2021 08:38:37 +0000 + +citus (10.1.0.citus-1) stable; urgency=low + + * Drops support for PostgreSQL 11 + + * Adds `shard_count` parameter to `create_distributed_table` function + + * Adds support for `ALTER DATABASE OWNER` + + * Adds support for temporary columnar tables + + * Adds support for using sequences as column default values when syncing + metadata + + * `alter_columnar_table_set` enforces columnar table option constraints + + * Continues to remove shards after failure in `DropMarkedShards` + + * Deprecates the `citus.replication_model` GUC + + * Enables `citus.defer_drop_after_shard_move` by default + + * Ensures free disk space before moving a shard + + * Fetches shard size on the fly for the rebalance monitor + + * Ignores old placements when disabling or removing a node + + * Implements `improvement_threshold` at shard rebalancer moves + + * Improves orphaned shard cleanup logic + + * Improves performance of `citus_shards` + + * Introduces `citus.local_hostname` GUC for connections to the current node + + * Makes sure connection is closed after each shard move + + * Makes sure that target node in shard moves is eligible for shard move + + * Optimizes partitioned disk size calculation for shard rebalancer + + * Prevents connection errors by properly terminating connections + + * Prevents inheriting a distributed table + + * Prevents users from dropping & truncating known shards + + * Pushes down `VALUES` clause as long as not in outer part of a `JOIN` + + * Reduces memory usage for multi-row inserts + + * Reduces memory usage while rebalancing shards + + * Removes length limits around partition names + + * Removes dependencies on the existence of public schema + + * Executor avoids opening extra connections + + * Excludes orphaned shards while finding shard placements + + * Preserves access method of materialized views when undistributing + or altering distributed tables + + * Fixes a bug that allowed moving of shards belonging to a reference table + + * Fixes a bug that can cause a crash when DEBUG4 logging is enabled + + * Fixes a bug that causes pruning incorrect shard of a range distributed table + + * Fixes a bug that causes worker_create_or_alter_role to crash with NULL input + + * Fixes a bug where foreign key to reference table was disallowed + + * Fixes a bug with local cached plans on tables with dropped columns + + * Fixes data race in `get_rebalance_progress` + + * Fixes `FROM ONLY` queries on partitioned tables + + * Fixes an issue that could cause citus_finish_pg_upgrade to fail + + * Fixes error message for local table joins + + * Fixes issues caused by omitting public schema in queries + + * Fixes nested `SELECT` query with `UNION` bug + + * Fixes null relationName bug at parallel execution + + * Fixes possible segfaults when using Citus in the middle of an upgrade + + * Fixes problems with concurrent calls of `DropMarkedShards` + + * Fixes shared dependencies that are not resident in a database + + * Fixes stale hostnames bug in prepared statements after `master_update_node` + + * Fixes the relation size bug during rebalancing + + * Fixes two race conditions in the get_rebalance_progress + + * Fixes using 2PC when it might be necessary + + -- Gurkan Indibay Fri, 16 Jul 2021 15:37:21 +0000 + +citus (10.0.4.citus-1) stable; urgency=low + + * Introduces `citus.local_hostname` GUC for connections to the current node + + * Removes dependencies on the existence of public schema + + * Removes limits around long partition names + + * Fixes a bug that can cause a crash when DEBUG4 logging is enabled + + * Fixes a bug that causes pruning incorrect shard of a range distributed table + + * Fixes an issue that could cause citus_finish_pg_upgrade to fail + + * Fixes FROM ONLY queries on partitioned tables + + * Fixes issues caused by public schema being omitted in queries + + * Fixes problems with concurrent calls of DropMarkedShards + + * Fixes relname null bug when using parallel execution + + * Fixes two race conditions in the get_rebalance_progress + + -- Gurkan Indibay Fri, 16 Jul 2021 10:58:55 +0000 + +citus (9.5.6.citus-1) stable; urgency=low + + * Fixes minor bug in `citus_prepare_pg_upgrade` that caused it to lose its + idempotency + + -- Gurkan Fri, 09 Jul 2021 13:30:42 +0000 + +citus (9.4.5.citus-1) stable; urgency=low + + * Adds a configure flag to enforce security + + * Avoids re-using connections for intermediate results + + * Fixes a bug that causes pruning incorrect shard of a range distributed table + + * Fixes a bug that might cause self-deadlocks when COPY used in TX block + + * Fixes an issue that could cause citus_finish_pg_upgrade to fail + + -- Gurkan Thu, 08 Jul 2021 10:15:23 +0000 + +citus (10.0.3.citus-1) stable; urgency=low + + * Prevents infinite recursion for queries that involve UNION ALL + below `JOIN` + + * Fixes a crash in queries with a modifying CTE and a SELECT + without `FROM` + + * Fixes upgrade and downgrade paths for citus_update_table_statistics + + * Fixes a bug that causes SELECT queries to use 2PC unnecessarily + + * Fixes a bug that might cause self-deadlocks with + `CREATE INDEX` / `REINDEX CONCURRENTLY` commands + + * Adds citus.max_cached_connection_lifetime GUC to set maximum connection + lifetime + + * Adds citus.remote_copy_flush_threshold GUC that controls + per-shard memory usages by `COPY` + + * Adds citus_get_active_worker_nodes UDF to deprecate + `master_get_active_worker_nodes` + + * Skips 2PC for readonly connections in a transaction + + * Makes sure that local execution starts coordinated transaction + + * Removes open temporary file warning when cancelling a query with + an open tuple store + + * Relaxes the locks when adding an existing node + + -- Gurkan Indibay Thu, 18 Mar 2021 01:40:08 +0000 + +citus (10.0.2.citus-1) stable; urgency=low + + * Adds a configure flag to enforce security + + * Fixes a bug due to cross join without target list + + * Fixes a bug with UNION ALL on PG 13 + + * Fixes a compatibility issue with pg_audit in utility calls + + * Fixes insert query with CTEs/sublinks/subqueries etc + + * Grants SELECT permission on citus_tables view to public + + * Grants SELECT permission on columnar metadata tables to public + + * Improves citus_update_table_statistics and provides distributed deadlock + detection + + * Preserves colocation with procedures in alter_distributed_table + + * Prevents using alter_columnar_table_set and alter_columnar_table_reset + on a columnar table not owned by the user + + * Removes limits around long table names + + -- Gurkan Indibay Thu, 4 Mar 2021 2:46:54 +0000 + +citus (9.5.2.citus-1) stable; urgency=low + + * Fixes distributed deadlock detection being blocked by metadata sync + + * Prevents segfaults when SAVEPOINT handling cannot recover from connection + failures + + * Fixes possible issues that might occur with single shard distributed tables + + -- gurkanindibay Wed, 27 Jan 2021 11:25:38 +0000 + +citus (9.4.4.citus-1) stable; urgency=low + + * Fixes a bug that could cause router queries with local tables to be pushed + down + + * Fixes a segfault in connection management due to invalid connection hash + entries + + * Fixes possible issues that might occur with single shard distributed tables + + -- gurkanindibay Tue, 5 Jan 2021 14:58:56 +0000 + +citus (9.5.1.citus-1) stable; urgency=low + + * Enables PostgreSQL's parallel queries on EXPLAIN ANALYZE + + * Fixes a bug that could cause excessive memory consumption when a partition is + created + + * Fixes a bug that triggers subplan executions unnecessarily with cursors + + * Fixes a segfault in connection management due to invalid connection hash + entries + + -- Onur Tirtir Wed, 2 Dec 2020 14:28:44 +0000 + +citus (9.4.3.citus-1) stable; urgency=low + + * Enables PostgreSQL's parallel queries on EXPLAIN ANALYZE + + * Fixes a bug that triggers subplan executions unnecessarily with cursors + + -- Onur Tirtir Tue, 24 Nov 2020 11:17:57 +0000 + +citus (9.5.0.citus-1) stable; urgency=low + + * Adds support for PostgreSQL 13 + + * Removes the task-tracker executor + + * Introduces citus local tables + + * Introduces undistribute_table UDF to convert tables back to postgres tables + + * Adds support for EXPLAIN (ANALYZE) EXECUTE and EXPLAIN EXECUTE + + * Adds support for EXPLAIN (ANALYZE, WAL) for PG13 + + * Sorts the output of EXPLAIN (ANALYZE) by execution duration. + + * Adds support for CREATE TABLE ... USING table_access_method + + * Adds support for WITH TIES option in SELECT and INSERT SELECT queries + + * Avoids taking multi-shard locks on workers + + * Enforces citus.max_shared_pool_size config in COPY queries + + * Enables custom aggregates with multiple parameters to be executed on workers + + * Enforces citus.max_intermediate_result_size in local execution + + * Improves cost estimation of INSERT SELECT plans + + * Introduces delegation of procedures that read from reference tables + + * Prevents pull-push execution for simple pushdownable subqueries + + * Improves error message when creating a foreign key to a local table + + * Makes citus_prepare_pg_upgrade idempotent by dropping transition tables + + * Disallows ON TRUE outer joins with reference & distributed tables when + reference table is outer relation to avoid incorrect results + + * Disallows field indirection in INSERT/UPDATE queries to avoid incorrect + results + + * Disallows volatile functions in UPDATE subqueries to avoid incorrect results + + * Fixes CREATE INDEX CONCURRENTLY crash with local execution + + * Fixes citus_finish_pg_upgrade to drop all backup tables + + * Fixes a bug that cause failures when RECURSIVE VIEW joined reference table + + * Fixes DROP SEQUENCE failures when metadata syncing is enabled + + * Fixes a bug that caused CREATE TABLE with CHECK constraint to fail + + * Fixes a bug that could cause VACUUM to deadlock + + * Fixes master_update_node failure when no background worker slots are available + + * Fixes a bug that caused replica identity to not be propagated on shard repair + + * Fixes a bug that could cause crashes after connection timeouts + + * Fixes a bug that could cause crashes with certain compile flags + + * Fixes a bug that could cause deadlocks on CREATE INDEX + + * Fixes a bug with genetic query optimization in outer joins + + * Fixes a crash when aggregating empty tables + + * Fixes a crash with inserting domain constrained composite types + + * Fixes a crash with multi-row & router INSERT's in local execution + + * Fixes a possibility of doing temporary file cleanup more than once + + * Fixes incorrect setting of join related fields + + * Fixes memory issues around deparsing index commands + + * Fixes reference table access tracking for sequential execution + + * Fixes removal of a single node with only reference tables + + * Fixes sending commands to coordinator when it is added as a worker + + * Fixes write queries with const expressions and COLLATE in various places + + * Fixes wrong cancellation message about distributed deadlock + + -- Onur Tirtir Wed, 11 Nov 2020 15:00:27 +0000 + +citus (9.4.2.citus-1) stable; urgency=low + + * Fixes a bug that could lead to multiple maintenance daemons + + * Fixes an issue preventing views in reference table modifications + + -- Onur Tirtir Thu, 22 Oct 2020 8:53:44 +0000 + +citus (9.4.1.citus-1) stable; urgency=low + + * Fixes EXPLAIN ANALYZE output truncation + + * Fixes a deadlock during transaction recovery + + -- Onur Tirtir Wed, 30 Sep 2020 9:33:46 +0000 + +citus (9.4.0.citus-1) stable; urgency=low + + * Improves COPY by honoring max_adaptive_executor_pool_size config + + * Adds support for insert into local table select from distributed table + + * Adds support to partially push down tdigest aggregates + + * Adds support for receiving binary encoded results from workers using + citus.enable_binary_protocol + + * Enables joins between local tables and CTEs + + * Adds showing query text in EXPLAIN output when explain verbose is true + + * Adds support for showing CTE statistics in EXPLAIN ANALYZE + + * Adds support for showing amount of data received in EXPLAIN ANALYZE + + * Introduces downgrade paths in migration scripts + + * Avoids returning incorrect results when changing roles in a transaction + + * Fixes ALTER TABLE IF EXISTS SET SCHEMA with non-existing table bug + + * Fixes CREATE INDEX CONCURRENTLY with no index name on a postgres table bug + + * Fixes a bug that could cause crashes with certain compile flags + + * Fixes a bug with lists of configuration values in ALTER ROLE SET statements + + * Fixes a bug that occurs when coordinator is added as a worker node + + * Fixes a crash because of overflow in partition id with certain compile flags + + * Fixes a crash that may happen if no worker nodes are added + + * Fixes a crash that occurs when inserting implicitly coerced constants + + * Fixes a crash when aggregating empty tables + + * Fixes a memory leak in subtransaction memory handling + + * Fixes crash when using rollback to savepoint after cancellation of DML + + * Fixes deparsing for queries with anonymous column references + + * Fixes distribution of composite types failing to include typemods + + * Fixes explain analyze on adaptive executor repartitions + + * Fixes possible error throwing in abort handle + + * Fixes segfault when evaluating func calls with default params on coordinator + + * Fixes several EXPLAIN ANALYZE issues + + * Fixes write queries with const expressions and COLLATE in various places + + * Fixes wrong cancellation message about distributed deadlocks + + * Reports correct INSERT/SELECT method in EXPLAIN + + * Disallows triggers on citus tables + + -- Onur Tirtir Tue, 28 Jul 2020 13:22:31 +0000 + +citus (9.3.5.citus-1) stable; urgency=low + + * Fixes ALTER TABLE IF EXISTS SET SCHEMA with non-existing table bug + + * Fixes CREATE INDEX CONCURRENTLY with no index name on a postgres table bug + + * Fixes a crash because of overflow in partition id with certain compile flags + + -- Onur Tirtir Mon, 27 Jul 2020 7:28:18 +0000 + +citus (9.3.4.citus-1) stable; urgency=low + + * Fixes a bug that could cause crashes with certain compile flags + + * Fixes a bug with lists of configuration values in ALTER ROLE SET statements + + * Fixes deparsing for queries with anonymous column references + + -- Onur Tirtir Wed, 22 Jul 2020 9:00:01 +0000 + +citus (9.3.3.citus-1) stable; urgency=low + + * Fixes a memory leak in subtransaction memory handling + + -- Onur Tirtir Mon, 13 Jul 2020 8:47:40 +0000 + +citus (9.3.0.citus-1) stable; urgency=low + + * Adds max_shared_pool_size to control number of connections across sessions + + * Adds support for window functions on coordinator + + * Improves shard pruning logic to understand OR-conditions + + * Prevents using an extra connection for intermediate result multi-casts + + * Adds propagation of ALTER ROLE .. SET statements + + * Adds update_distributed_table_colocation UDF to update colocation of tables + + * Introduces a UDF to truncate local data after distributing a table + + * Adds support for creating temp schemas in parallel + + * Adds support for evaluation of nextval in the target list on coordinator + + * Adds support for local execution of COPY/TRUNCATE/DROP/DDL commands + + * Adds support for local execution of shard creation + + * Uses local execution in a transaction block + + * Adds support for querying distributed table sizes concurrently + + * Allows master_copy_shard_placement to replicate placements to new nodes + + * Allows table type to be used in target list + + * Avoids having multiple maintenance daemons active for a single database + + * Defers reference table replication to shard creation time + + * Enables joins between local tables and reference tables in transaction blocks + + * Ignores pruned target list entries in coordinator plan + + * Improves SIGTERM handling of maintenance daemon + + * Increases the default of citus.node_connection_timeout to 30 seconds + + * Fixes a bug that occurs when creating remote tasks in local execution + + * Fixes a bug that causes some DML queries containing aggregates to fail + + * Fixes a bug that could cause failures in queries with subqueries or CTEs + + * Fixes a bug that may cause some connection failures to throw errors + + * Fixes a bug which caused queries with SRFs and function evalution to fail + + * Fixes a bug with generated columns when executing COPY dist_table TO file + + * Fixes a crash when using non-constant limit clauses + + * Fixes a failure when composite types used in prepared statements + + * Fixes a possible segfault when dropping dist. table in a transaction block + + * Fixes a possible segfault when non-pushdownable aggs are solely used in HAVING + + * Fixes a segfault when executing queries using GROUPING + + * Fixes an error when using LEFT JOIN with GROUP BY on primary key + + * Fixes an issue with distributing tables having generated cols not at the end + + * Fixes automatic SSL permission issue when using "initdb --allow-group-access" + + * Fixes errors which could occur when subqueries are parameters to aggregates + + * Fixes possible issues by invalidating the plan cache in master_update_node + + * Fixes timing issues which could be caused by changing system clock + + -- Onur Tirtir Thu, 7 May 2020 15:11:25 +0000 + +citus (9.2.4.citus-1) stable; urgency=low + + * Fixes a release problem in 9.2.3 + + -- Onur Tirtir Tue, 31 Mar 2020 08:06:59 +0000 + +citus (9.2.3.citus-1) stable; urgency=low + + * Do not use C functions that have been banned by Microsoft + + * Fixes a bug that causes wrong results with complex outer joins + + * Fixes issues found using static analysis + + * Fixes left join shard pruning in pushdown planner + + * Fixes possibility for segmentation fault in internal aggregate functions + + * Fixes possible segfault when non pushdownable aggregates are used in HAVING + + * Improves correctness of planning subqueries in HAVING + + * Prevents using old connections for security if citus.node_conninfo changed + + * Uses Microsoft approved cipher string for default TLS setup + + -- Onur Tirtir Thu, 26 Mar 2020 8:22:48 +0000 + +citus (9.0.2.citus-1) stable; urgency=low + + * Fixes build errors on EL/OL 6 based distros + + * Fixes a bug that caused maintenance daemon to fail on standby nodes + + * Disallows distributed function creation when replication_model is `statement` + + -- Onur Tirtir Fri, 6 Mar 2020 14:10:16 +0000 + +citus (9.2.2.citus-1) stable; urgency=low + + * Fixes a bug that caused some prepared stmts with function calls to fail + + * Fixes a bug that caused some prepared stmts with composite types to fail + + * Fixes a bug that caused missing subplan results in workers + + * Improves performance of re-partition joins + + -- Onur Tirtir Fri, 6 Mar 2020 07:14:20 +0000 + +citus (9.2.1.citus-1) stable; urgency=low + + * Fixes a bug that could cause crashes if distribution key is NULL + + -- Onur Tirtir Fri, 14 Feb 2020 11:51:09 +0000 + +citus (9.2.0.citus-1) stable; urgency=low + + * Adds support for INSERT...SELECT queries with re-partitioning + + * Adds citus.coordinator_aggregation_strategy to support more aggregates + + * Adds caching of local plans on shards for Citus MX + + * Adds compatibility support for dist. object infrastructure from old versions + + * Adds defering shard-pruning for fast-path router queries to execution + + * Adds propagation of GRANT ... ON SCHEMA queries + + * Adds support for CTE pushdown via CTE inlining in distributed planning + + * Adds support for ALTER TABLE ... SET SCHEMA propagation. + + * Adds support for DROP ROUTINE & ALTER ROUTINE commands + + * Adds support for any inner join on a reference table + + * Changes citus.log_remote_commands level to NOTICE + + * Disallows marking ref. table shards unhealthy in the presence of savepoints + + * Disallows placing new shards with shards in TO_DELETE state + + * Enables local execution of queries that do not need any data access + + * Fixes Makefile trying to cleanup PG directory during install + + * Fixes a bug causing errors when planning a query with multiple subqueries + + * Fixes a possible deadlock that could happen during shard moves + + * Fixes a problem when adding a new node due to tables referenced in func body + + * Fixes an issue that could cause joins with reference tables to be slow + + * Fixes cached metadata for shard is inconsistent issue + + * Fixes inserting multiple composite types as partition key in VALUES + + * Fixes unnecessary repartition on joins with more than 4 tables + + * Prevents wrong results for replicated partitioned tables after failure + + * Restricts LIMIT approximation for non-commutative aggregates + + -- Onur Tirtir Wed, 10 Feb 2020 8:48:00 +0000 + +citus (9.1.1.citus-1) stable; urgency=low + + * Fixes a bug causing SQL-executing UDFs to crash when passing in DDL + + * Fixes a bug that caused column_to_column_name to crash for invalid input + + * Fixes a bug that caused inserts into local tables w/ dist. subqueries to crash + + * Fixes a bug that caused some noop DML statements to fail + + * Fixes a bug that prevents dropping reference table columns + + * Fixes a crash in IN (.., NULL) queries + + * Fixes a crash when calling a distributed function from PL/pgSQL + + * Fixes an issue that caused CTEs to sometimes leak connections + + * Fixes strange errors in DML with unreachable sublinks + + * Prevents statements in SQL functions to run outside of a transaction + + -- Onur Tirtir Wed, 18 Dec 2019 14:32:42 +0000 + +citus (9.1.0.citus-1) stable; urgency=low + + * Adds extensions to distributed object propagation infrastructure + + * Adds support for ALTER ROLE propagation + + * Adds support for aggregates in create_distributed_function + + * Adds support for expressions in reference joins + + * Adds support for returning RECORD in multi-shard queries + + * Adds support for simple IN subqueries on unique cols in repartition joins + + * Adds support for subqueries in HAVING clauses + + * Automatically distributes unary aggs w/ combinefunc and non-internal stype + + * Disallows distributed func creation when replication_model is 'statement' + + * Drops support for deprecated real-time and router executors + + * Fixes a bug in local execution that could cause missing rows in RETURNING + + * Fixes a bug that caused maintenance daemon to fail on standby nodes + + * Fixes a bug that caused other CREATE EXTENSION commands to take longer + + * Fixes a bug that prevented REFRESH MATERIALIZED VIEW + + * Fixes a bug when view is used in modify statements + + * Fixes a memory leak in adaptive executor when query returns many columns + + * Fixes underflow init of default values in worker extended op node creation + + * Fixes potential segfault in standard_planner inlining functions + + * Fixes an issue that caused failures in RHEL 6 builds + + * Fixes queries with repartition joins and group by unique column + + * Improves CTE/Subquery performance by pruning intermediate rslt broadcasting + + * Removes citus.worker_list_file GUC + + * Revokes usage from the citus schema from public + + -- Onur Tirtir Thu, 28 Nov 2019 15:11:05 +0000 + +citus (9.0.1.citus-1) stable; urgency=low + + * Fixes a memory leak in the executor + + * Revokes usage from the citus schema from public + + -- Hanefi Onaldi Wed, 30 Oct 2019 8:53:22 +0000 + +citus (9.0.0.citus-1) stable; urgency=low + + * Adds support for PostgreSQL 12 + + * Adds UDFs to help with PostgreSQL upgrades + + * Distributes types to worker nodes + + * Introduces create_distributed_function UDF + + * Introduces local query execution for Citus MX + + * Implements infrastructure for routing CALL to MX workers + + * Implements infrastructure for routing SELECT function() to MX workers + + * Adds support for foreign key constraints between reference tables + + * Adds a feature flag to turn off CREATE TYPE propagation + + * Adds option citus.single_shard_commit_protocol + + * Adds support for EXPLAIN SUMMARY + + * Adds support for GENERATE ALWAYS AS STORED + + * Adds support for serial and smallserial in MX mode + + * Adds support for anon composite types on the target list in router queries + + * Avoids race condition between create_reference_table & master_add_node + + * Fixes a bug in schemas of distributed sequence definitions + + * Fixes a bug that caused run_command_on_colocated_placements to fail + + * Fixes a bug that leads to various issues when a connection is lost + + * Fixes a schema leak on CREATE INDEX statement + + * Fixes assert failure in bare SELECT FROM reference table FOR UPDATE in MX + + * Makes master_update_node MX compatible + + * Prevents pg_dist_colocation from multiple records for reference tables + + * Prevents segfault in worker_partition_protocol edgecase + + * Propagates ALTER FUNCTION statements for distributed functions + + * Propagates CREATE OR REPLACE FUNCTION for distributed functions + + * Propagates REINDEX on tables & indexes + + * Provides a GUC to turn of the new dependency propagation functionality + + * Uses 2PC in adaptive executor when dealing with replication factors above 1 + + -- Hanefi Onaldi Tue, 15 Oct 2019 16:54:50 +0000 + +citus (8.3.2.citus-1) stable; urgency=low + + * Fixes performance issues by skipping unnecessary relation access recordings + + -- Hanefi Onaldi Fri, 9 Aug 2019 11:15:57 +0000 + +citus (8.3.1.citus-1) stable; urgency=low + + * Improves Adaptive Executor performance + + -- Hanefi Onaldi Mon, 29 Jul 2019 10:25:50 +0000 + +citus (8.3.0.citus-1) stable; urgency=low + + * Adds a new distributed executor: Adaptive Executor + + * citus.enable_statistics_collection defaults to off (opt-in) + + * Adds support for CTEs in router planner for modification queries + + * Adds support for propagating SET LOCAL at xact start + + * Adds option to force master_update_node during failover + + * Deprecates master_modify_multiple_shards + + * Improves round robin logic on router queries + + * Creates all distributed schemas as superuser on a separate connection + + * Makes COPY adapt to connection use behaviour of previous commands + + * Replaces SESSION_LIFESPAN with configurable no. of connections at xact end + + * Propagates ALTER FOREIGN TABLE commands to workers + + * Don't schedule tasks on inactive nodes + + * Makes DROP/VALIDATE CONSTRAINT tolerant of ambiguous shard extension + + * Fixes an issue with subquery map merge jobs as non-root + + * Fixes null pointers caused by partial initialization of ConnParamsHashEntry + + * Fixes errors caused by joins with shadowed aliases + + * Fixes a regression in outer joining subqueries introduced in 8.2.0 + + * Fixes a crash that can occur under high memory load + + * Fixes a bug that selects wrong worker when using round-robin assignment + + * Fixes savepoint rollback after multi-shard modify/copy failure + + * Fixes bad foreign constraint name search + + * Fixes a bug that prevents stack size to be adjusted + + -- Hanefi Onaldi Wed, 10 Jul 2019 15:19:02 +0000 + +citus (8.2.2.citus-1) stable; urgency=low + + * Fixes a bug in outer joins wrapped in subqueries + + -- Burak Velioglu Wed, 12 Jun 2019 8:45:08 +0000 + +citus (8.2.1.citus-1) stable; urgency=low + + * Fixes a bug that prevents stack size to be adjusted + + -- Burak Velioglu Wed, 3 Apr 2019 20:56:47 +0000 + +citus (8.1.2.citus-1) stable; urgency=low + + * Don't do redundant ALTER TABLE consistency checks at coordinator + + * Fixes a bug that prevents stack size to be adjusted + + * Fix an issue with some DECLARE .. CURSOR WITH HOLD commands + + -- Burak Velioglu Wed, 3 Apr 2019 20:34:46 +0000 + +citus (8.2.0.citus-1) stable; urgency=low + + * Removes support and code for PostgreSQL 9.6 + + * Enable more outer joins with reference tables + + * Execute CREATE INDEX CONCURRENTLY in parallel + + * Treat functions as transaction blocks + + * Add support for column aliases on join clauses + + * Skip standard_planner() for trivial queries + + * Added support for function calls in joins + + * Round-robin task assignment policy relies on local transaction id + + * Relax subquery union pushdown restrictions for reference tables + + * Speed-up run_command_on_shards() + + * Address some memory issues in connection config + + * Restrict visibility of get_*_active_transactions functions to pg_monitor + + * Don't do redundant ALTER TABLE consistency checks at coordinator + + * Queries with only intermediate results do not rely on task assignment policy + + * Finish connection establishment in parallel for multiple connections + + * Fixes a bug related to pruning shards using a coerced value + + * Fix an issue with some DECLARE .. CURSOR WITH HOLD commands + + * Fixes a bug that could lead to infinite recursion during recursive planning + + * Fixes a bug that could prevent planning full outer joins with using clause + + * Fixes a bug that could lead to memory leak on citus_relation_size + + * Fixes a problem that could cause segmentation fault with recursive planning + + * Switch CI solution to CircleCI + + -- Burak Velioglu Fri, 29 Mar 2019 07:36:09 +0000 + +citus (8.0.3.citus-1) stable; urgency=low + + * Fixes maintenance daemon panic due to unreleased spinlock + + * Fixes an issue with having clause when used with complex joins + + -- Hanefi Onaldi Wed, 9 Jan 2019 9:50:07 +0000 + +citus (8.1.1.citus-1) stable; urgency=low + + * Fixes maintenance daemon panic due to unreleased spinlock + + * Fixes an issue with having clause when used with complex joins + + -- Hanefi Onaldi Mon, 7 Jan 2019 16:26:13 +0000 + +citus (8.1.0.citus-1) stable; urgency=low + + * Turns on ssl by default for new installations of citus + + * Restricts SSL Ciphers to TLS1.2 and above + + * Adds support for INSERT INTO SELECT..ON CONFLICT/RETURNING via coordinator + + * Adds support for round-robin task assignment for queries to reference tables + + * Adds support for SQL tasks using worker_execute_sql_task UDF with task-tracker + + * Adds support for VALIDATE CONSTRAINT queries + + * Adds support for disabling hash aggregate with HLL + + * Adds user ID suffix to intermediate files generated by task-tracker + + * Only allow transmit from pgsql_job_cache directory + + * Disallows GROUPING SET clauses in subqueries + + * Removes restriction on user-defined group ID in node addition functions + + * Relaxes multi-shard modify locks when enable_deadlock_prevention is disabled + + * Improves security in task-tracker protocol + + * Improves permission checks in internal DROP TABLE functions + + * Improves permission checks in cluster management functions + + * Cleans up UDFs and fixes permission checks + + * Fixes crashes caused by stack size increase under high memory load + + * Fixes a bug that could cause maintenance daemon panic + + -- Burak Velioglu Tue, 18 Dec 2018 15:12:45 +0000 + +citus (8.0.2.citus-1) stable; urgency=low + + * Fixes a bug that could cause maintenance daemon panic + + * Fixes crashes caused by stack size increase under high memory load + + -- Burak Velioglu Thu, 13 Dec 2018 13:56:44 +0000 + +citus (7.5.4.citus-1) stable; urgency=low + + * Fixes a bug that could cause maintenance daemon panic + + -- Burak Velioglu Wed, 12 Dec 2018 11:45:24 +0000 + +citus (8.0.1.citus-1) stable; urgency=low + + * Execute SQL tasks using worker_execute_sql_task UDF when using task-tracker + + -- Burak Velioglu Wed, 28 Nov 2018 11:38:47 +0000 + +citus (7.5.3.citus-1) stable; urgency=low + + * Execute SQL tasks using worker_execute_sql_task UDF when using task-tracker + + -- Burak Velioglu Wed, 28 Nov 2018 10:52:20 +0000 + +citus (7.5.2.citus-1) stable; urgency=low + + * Fixes inconsistent metadata error when shard metadata caching get interrupted + + * Fixes a bug that could cause memory leak + + * Fixes a bug that prevents recovering wrong transactions in MX + + * Fixes a bug to prevent wrong memory accesses on Citus MX under very high load + + * Fixes crashes caused by stack size increase under high memory load + + -- Burak Velioglu Wed, 14 Nov 2018 20:42:16 +0000 + +citus (8.0.0.citus-1) stable; urgency=low + + * Adds support for PostgreSQL 11 + + * Adds support for applying DML operations on reference tables from MX nodes + + * Adds distributed locking to truncated MX tables + + * Adds support for running TRUNCATE command from MX worker nodes + + * Adds views to provide insight about the distributed transactions + + * Adds support for TABLESAMPLE in router queries + + * Adds support for INCLUDE option in index creation + + * Adds option to allow simple DML commands from hot standby + + * Adds support for partitioned tables with replication factor > 1 + + * Prevents a deadlock on concurrent DROP TABLE and SELECT on Citus MX + + * Fixes a bug that prevents recovering wrong transactions in MX + + * Fixes a bug to prevent wrong memory accesses on Citus MX under very high load + + * Fixes a bug in MX mode, calling DROP SCHEMA with existing partitioned table + + * Fixes a bug that could cause modifying CTEs to select wrong execution mode + + * Fixes a bug preventing rollback in CREATE PROCEDURE + + * Fixes a bug on not being able to drop index on a partitioned table + + * Fixes a bug on TRUNCATE when there is a foreign key to a reference table + + * Fixes a performance issue in prepared INSERT..SELECT + + * Fixes a bug which causes errors on DROP DATABASE IF EXISTS + + * Fixes a bug to remove intermediate result directory in pull-push execution + + * Improves query pushdown planning performance + + * Evaluate functions anywhere in query + + -- Burak Velioglu Fri, 02 Nov 2018 08:06:42 +0000 + +citus (7.5.1.citus-1) stable; urgency=low + + * Improves query pushdown planning performance + + * Fixes a bug that could cause modifying CTEs to select wrong execution mode + + -- Burak Velioglu Wed, 29 Aug 2018 08:06:42 +0000 + +citus (7.4.2.citus-1) stable; urgency=low + + * Fixes a segfault in real-time executor during online shard move + + -- Mehmet Furkan Sahin Fri, 27 Jul 2018 13:42:27 +0000 + +citus (7.5.0.citus-1) stable; urgency=low + + * Adds foreign key support from hash distributed to reference tables + + * Adds SELECT ... FOR UPDATE support for router plannable queries + + * Adds support for non-partition columns in count distinct + + * Fixes a segfault in real-time executor during online shard move + + * Fixes ALTER TABLE ADD COLUMN constraint check + + * Fixes a bug where INSERT ... SELECT allows one to update dist. column + + * Allows DDL commands to be sequentialized via citus.multi_shard_modify_mode + + * Adds support for topn_union_agg and topn_add_agg across shards + + * Adds support for hll_union_agg and hll_add_agg across shards + + * Fixes a bug that might cause shards to have a wrong owner + + * Adds select_opens_transaction_block GUC + + * Adds utils to implement DDLs for policies in future + + * Makes intermediate results to use separate connections + + * Adds a node_conninfo GUC to set outgoing connection settings + + -- Mehmet Furkan Sahin Wed, 25 Jul 2018 9:32:24 +0000 + +citus (6.2.6.citus-1) stable; urgency=low + + * Adds support for respecting enable_hashagg in the master planner + + -- Burak Velioglu Fri, 06 Jul 2018 13:30:08 +0000 + +citus (7.4.1.citus-1) stable; urgency=low + + * Fixes a bug that could cause txns to incorrectly proceed after failure + + * Fixes a bug on INSERT ... SELECT queries in prepared statements + + -- Burak Velioglu Wed, 20 Jun 2018 12:25:30 +0000 + +citus (7.2.2.citus-1) stable; urgency=low + + * Fixes a bug that could cause SELECTs to crash during a rebalance + + -- Burak Velioglu Thu, 17 May 2018 11:51:56 +0000 + +citus (7.4.0.citus-1) stable; urgency=low + + * Adds support for non-pushdownable subqueries and CTEs in UPDATE/DELETE + + * Adds support for pushdownable subqueries and joins in UPDATE/DELETE + + * Adds faster shard pruning for subqueries + + * Adds partitioning support to MX table + + * Adds support for (VACUUM | ANALYZE) VERBOSE + + * Adds support for multiple ANDs in HAVING for pushdown planner + + * Adds support for quotation needy schema names + + * Improves operator check time in physical planner for custom data types + + * Removes broadcast join logic + + * Deprecates large_table_shard_count and master_expire_table_cache() + + * Modifies master_update_node to write-lock shards hosted by node over update + + * DROP TABLE now drops shards as the currrent user instead of the superuser + + * Adds specialised error codes for connection failures + + * Improves error messages on connection failure + + * Fixes issue which prevented multiple citus_table_size calls per query + + * Tests are updated to use create_distributed_table + + -- Burak Velioglu Tue, 15 May 2018 13:01:17 +0000 + +citus (7.3.0.citus-1) stable; urgency=low + + * Adds support for non-colocated joins between subqueries + + * Adds support for window functions that can be pushed down to worker + + * Adds support for modifying CTEs + + * Adds recursive plan for WHERE clause subqueries with recurring FROM clause + + * Adds support for bool_ and bit_ aggregates + + * Adds support for Postgres jsonb and json aggregation functions + + * Adds support for respecting enable_hashagg in the master plan + + * Performance improvements to reduce distributed planning time + + * Fixes a bug on planner when aggregate is used in ORDER BY + + * Fixes a bug on planner when DISTINCT (ON) clause is used with GROUP BY + + * Fixes a planner bug with distinct and aggregate clauses + + * Fixes a bug that opened new connections on each table size function call + + * Fixes a bug canceling backends not involved in distributed deadlocks + + * Fixes count distinct bug on column expressions when used with subqueries + + * Improves error handling on worker node failures + + * Improves error messages for INSERT queries that have subqueries + + -- Burak Velioglu Thu, 15 Mar 2018 14:16:10 +0000 + +citus (7.2.1.citus-1) stable; urgency=low + + * Fixes count distinct bug on column expressions when used with subqueries + + * Adds support for respecting enable_hashagg in the master plan + + * Fixes a bug canceling backends not involved in distributed deadlocks + + -- Burak Velioglu Tue, 06 Feb 2018 14:46:07 +0000 + +citus (7.2.0.citus-1) stable; urgency=low + + * Adds support for CTEs + + * Adds support for subqueries that require merge step + + * Adds support for set operations (UNION, INTERSECT, ...) + + * Adds support for 2PC auto-recovery + + * Adds support for querying local tables in CTEs and subqueries + + * Adds support for more SQL coverage in subqueries for reference tables + + * Adds support for count(distinct) in queries with a subquery + + * Adds support for non-equijoins when there is already an equijoin + + * Adds support for real-time executor to run in transaction blocks + + * Adds infrastructure for storing intermediate distributed query results + + * Adds a new GUC named enable_repartition_joins for auto executor switch + + * Adds support for limiting the intermediate result size + + * Improves support for queries with unions containing filters + + * Improves support for queries with unions containing joins + + * Improves support for subqueries in the WHERE clause + + * Increases COPY throughput + + * Enables pushing down queries containing only recurring tuples and GROUP BY + + * Load-balance queries that read from 0 shards + + * Improves support for using functions in subqueries + + * Fixes a bug that causing real-time executor to crash during cancellation + + * Fixes a bug that causing real-time executor to get stuck on cancellation + + * Fixes a bug that could block modification queries unnecessarily + + * Fixes a bug that could cause assigning wrong IDs to transactions + + * Fixes a bug that could cause an assert failure with ANALYZE statements + + * Fixes a bug that would push down wrong set operations in subqueries + + * Fixes a bug that could cause a deadlock in create_distributed_table + + * Fixes a bug that could confuse user about ANALYZE usage + + * Fixes a bug causing false positive distributed deadlock detections + + * Relaxes the locking for DDL commands on partitioned tables + + * Relaxes the locking on COPY with replication + + * Logs more remote commands when citus.log_remote_commands is set + + -- Burak Velioglu Tue, 16 Jan 2018 14:34:20 +0000 + +citus (6.2.5.citus-1) stable; urgency=low + + * Fixes a bug that could crash the coordinator while reporting a remote error + + -- Burak Velioglu Thu, 11 Jan 2018 11:40:28 +0000 + +citus (7.1.2.citus-1) stable; urgency=low + + * Fixes a bug that could cause assigning wrong IDs to transactions + + * Increases COPY throughput + + -- Burak Velioglu Fri, 05 Jan 2018 09:00:07 +0000 + +citus (7.1.1.citus-1) stable; urgency=low + + * Fixes a bug preventing pushing down subqueries with reference tables + + * Fixes a bug that could create false positive distributed deadlocks + + * Fixes a bug that could prevent running concurrent COPY and multi-shard DDL + + * Fixes a bug that could mislead users about ANALYZE queries + + -- Burak Velioglu Tue, 05 Dec 2017 09:00:07 +0000 + +citus (7.1.0.citus-1) stable; urgency=low + + * Adds support for native queries with multi shard UPDATE/DELETE queries + + * Expands reference table support in subquery pushdown + + * Adds window function support for subqueries and INSERT ... SELECT queries + + * Adds support for COUNT(DISTINCT) [ON] queries on non-partition columns + + * Adds support for DISTINCT [ON] queries on non-partition columns + + * Introduces basic usage statistic collector + + * Adds support for setting replica identity while creating distributed tables + + * Adds support for ALTER TABLE ... REPLICA IDENTITY queries + + * Adds pushdown support for LIMIT and HAVING grouped by partition key + + * Adds support for INSERT ... SELECT queries via worker nodes on MX clusters + + * Adds support for adding primary key using already defined index + + * Adds replication parameter to shard copy functions + + * Changes shard_name UDF to omit public schema name + + * Adds master_move_node UDF to make changes on nodename/nodeport more easy + + * Fixes a bug that could cause casting error with INSERT ... SELECT queries + + * Fixes a bug that could prevent upgrading servers from Citus 6.1 + + * Fixes a bug that could prevent attaching partitions to a table in schema + + * Fixes a bug preventing adding nodes to clusters with reference tables + + * Fixes a bug that could cause a crash with INSERT ... SELECT queries + + * Fixes a bug that could prevent creating a partitoned table on Cloud + + * Implements various performance improvements + + * Adds internal infrastructures and tests to improve development process + + * Addresses various race conditions and deadlocks + + * Improves and standardizes error messages + + -- Burak Velioglu Wed, 15 Nov 2017 09:00:07 +0000 + +citus (7.0.3.citus-1) stable; urgency=low + + * Fixes several bugs that could cause crash + + * Fixes a bug that could cause deadlock while creating reference tables + + * Fixes a bug that could cause false-positives in deadlock detection + + * Fixes a bug that could cause 2PC recovery not to work from MX workers + + * Fixes a bug that could cause cache incohorency + + * Fixes a bug that could cause maintenance daemon to skip cache invalidations + + * Improves performance of transaction recovery by using correct index + + -- Burak Yucesoy Mon, 16 Oct 2017 11:52:07 +0000 + +citus (7.0.2.citus-1) stable; urgency=low + + * Updates task-tracker to limit file access + + -- Burak Yucesoy Thu, 28 Sep 2017 22:29:01 +0000 + +citus (6.2.4.citus-1) stable; urgency=low + + * Updates task-tracker to limit file access + + -- Burak Yucesoy Thu, 28 Sep 2017 21:31:35 +0000 + +citus (6.1.3.citus-1) stable; urgency=low + + * Updates task-tracker to limit file access + + -- Burak Yucesoy Thu, 28 Sep 2017 20:31:35 +0000 + +citus (7.0.1.citus-1) stable; urgency=low + + * Fixes a bug that could cause memory leaks in INSERT ... SELECT queries + + * Fixes a bug that could cause incorrect execution of prepared statements + + * Fixes a bug that could cause excessive memory usage during COPY + + * Incorporates latest changes from core PostgreSQL code + + -- Burak Yucesoy Tue, 12 Sep 2017 17:53:50 +0000 + +citus (7.0.0.citus-1) stable; urgency=low + + * Adds support for PostgreSQL 10 + + * Drops support for PostgreSQL 9.5 + + * Adds support for multi-row INSERT + + * Adds support for router UPDATE and DELETE queries with subqueries + + * Adds infrastructure for distributed deadlock detection + + * Deprecates enable_deadlock_prevention flag + + * Adds support for partitioned tables + + * Adds support for creating UNLOGGED tables + + * Adds support for SAVEPOINT + + * Adds UDF citus_create_restore_point for taking distributed snapshots + + * Adds support for evaluating non-pushable INSERT ... SELECT queries + + * Adds support for subquery pushdown on reference tables + + * Adds shard pruning support for IN and ANY + + * Adds support for UPDATE and DELETE commands that prune down to 0 shard + + * Enhances transaction support by relaxing some transaction restrictions + + * Fixes a bug causing crash if distributed table has no shards + + * Fixes a bug causing crash when removing inactive node + + * Fixes a bug causing failure during COPY on tables with dropped columns + + * Fixes a bug causing failure during DROP EXTENSION + + * Fixes a bug preventing executing VACUUM and INSERT concurrently + + * Fixes a bug in prepared INSERT statements containing an implicit cast + + * Fixes several issues related to statement cancellations and connections + + * Fixes several 2PC related issues + + * Removes an unnecessary dependency causing warning messages in pg_dump + + * Adds internal infrastructure for follower clusters + + * Adds internal infrastructure for progress tracking + + * Implements various performance improvements + + * Adds internal infrastructures and tests to improve development process + + * Addresses various race conditions and deadlocks + + * Improves and standardizes error messages + + -- Burak Yucesoy Mon, 28 Aug 2017 12:27:53 +0000 + +citus (6.2.3.citus-1) stable; urgency=low + + * Fixes a crash during execution of local CREATE INDEX CONCURRENTLY + + * Fixes a bug preventing usage of quoted column names in COPY + + * Fixes a bug in prepared INSERTs with implicit cast in partition column + + * Relaxes locks in VACUUM to ensure concurrent execution with INSERT + + -- Burak Yucesoy Thu, 13 Jul 2017 11:27:17 +0000 + +citus (6.2.2.citus-1) stable; urgency=low + + * Fixes a common cause of deadlocks when repairing tables with foreign keys + + -- Burak Velioglu Wed, 07 Jun 2017 09:42:17 +0000 + +citus (6.1.2.citus-1) stable; urgency=low + + * Fixes a common cause of deadlocks when repairing tables with foreign keys + + -- Jason Petersen Wed, 31 May 2017 16:14:11 +0000 + +citus (6.2.1.citus-1) stable; urgency=low + + * Relaxes version-check logic to avoid breaking non-distributed commands + + -- Jason Petersen Wed, 24 May 2017 22:36:07 +0000 + +citus (6.2.0.citus-1) stable; urgency=low + + * Increases SQL subquery coverage by pushing down more kinds of queries + + * Adds CustomScan API support to allow read-only transactions + + * Adds support for CREATE/DROP INDEX CONCURRENTLY + + * Adds support for ALTER TABLE ... ADD CONSTRAINT + + * Adds support for ALTER TABLE ... RENAME COLUMN + + * Adds support for DISABLE/ENABLE TRIGGER ALL + + * Adds support for expressions in the partition column in INSERTs + + * Adds support for query parameters in combination with function evaluation + + * Adds support for creating distributed tables from non-empty local tables + + * Adds UDFs to get size of distributed tables + + * Adds UDFs to add a new node without replicating reference tables + + * Adds checks to prevent running Citus binaries with wrong metadata tables + + * Improves shard pruning performance for range queries + + * Improves planner performance for joins involving co-located tables + + * Improves shard copy performance by creating indexes after copy + + * Improves task-tracker performance by batching several status checks + + * Enables router planner for queries on range partitioned table + + * Changes TRUNCATE to drop local data only if enable_ddl_propagation is off + + * Starts to execute DDL on coordinator before workers + + * Fixes a bug causing incorrectly reading invalidated cache + + * Fixes a bug related to creation of schemas in workers with incorrect owner + + * Fixes a bug related to concurrent run of shard drop functions + + * Fixes a bug related to EXPLAIN ANALYZE with DML queries + + * Fixes a bug related to SQL functions in FROM clause + + * Adds a GUC variable to report cross shard queries + + * Fixes a bug related to partition columns without native hash function + + * Adds internal infrastructures and tests to improve development process + + * Addresses various race conditions and deadlocks + + * Improves and standardizes error messages + + -- Burak Yucesoy Tue, 16 May 2017 16:05:22 +0000 + +citus (6.1.1.citus-1) stable; urgency=low + + * Fixes a crash caused by router executor use after connection timeouts + + * Fixes a crash caused by relation cache invalidation during COPY + + * Fixes bug related to DDL use within PL/pgSQL functions + + * Fixes a COPY bug related to types lacking binary output functions + + * Fixes a bug related to modifications with parameterized partition values + + * Fixes improper value interpolation in worker sequence generation + + * Guards shard pruning logic against zero-shard tables + + * Fixes possible NULL pointer dereference and buffer underflow, via PVS-Studio + + * Fixes a INSERT...SELECT bug that could push down non-partition column JOINs + + -- Metin Doslu Fri, 5 May 2017 17:42:00 +0000 + +citus (6.1.0.citus-1) stable; urgency=low + + * Implements reference tables, transactionally replicated to all nodes + + * Adds upgrade_to_reference_table UDF to upgrade pre-6.1 reference tables + + * Expands prepared statement support to nearly all statements + + * Adds support for creating VIEWs which reference distributed tables + + * Adds targeted VACUUM/ANALYZE support + + * Adds support for the FILTER clause in aggregate expressions + + * Adds support for function evaluation within INSERT INTO ... SELECT + + * Adds support for creating foreign key constraints with ALTER TABLE + + * Adds logic to choose router planner for all queries it supports + + * Enhances create_distributed_table with parameter for explicit colocation + + * Adds generally useful utility UDFs previously available as "Citus Tools" + + * Adds user-facing UDFs for locking shard resources and metadata + + * Refactors connection and transaction management for more consistency + + * Enhances COPY with fully transactional semantics + + * Improves support for cancellation for a number of queries and commands + + * Adds column_to_column_name UDF to help users understand partkey values + + * Adds master_disable_node UDF for temporarily disabling nodes + + * Adds proper MX ("masterless") metadata propagation logic + + * Adds start_metadata_sync_to_node UDF to propagate metadata changes to nodes + + * Enhances SERIAL compatibility with MX tables + + * Adds an node_connection_timeout parameter to set node connection timeouts + + * Adds enable_deadlock_prevention setting to permit multi-node transactions + + * Adds a replication_model setting to specify replication of new tables + + * Changes the shard_replication_factor setting's default value to one + + * Adds code to automatically set max_prepared_transactions if not configured + + * Accelerates lookup of colocated shard placements + + * Fixes a bug affecting INSERT INTO ... SELECT queries using constant values + + * Fixes a bug by ensuring COPY does not mark placements inactive + + * Fixes a bug affecting reads from pg_dist_shard_placement table + + * Fixes a crash triggered by creating a foreign key without a column + + * Fixes a crash related to accessing catalog tables after aborted transaction + + * Fixes a bug affecting JOIN queries requiring repartitions + + * Fixes a bug affecting node insertions to pg_dist_node table + + * Fixes a crash triggered by queries with modifying common table expressions + + * Fixes a bug affecting workloads with concurrent shard appends and deletions + + * Addresses various race conditions and deadlocks + + * Improves and standardizes error messages + + -- Burak Yucesoy Thu, 9 Feb 2017 16:17:41 +0000 + +citus (6.0.1.citus-3) stable; urgency=low + + * First build using new versioning practices + + -- Jason Petersen Wed, 8 Feb 2017 23:19:46 +0000 + +citus (6.0.1.citus-2) stable; urgency=low + + * Transitional package to guide users to new package name + + -- Jason Petersen Mon, 6 Feb 2017 16:33:44 +0000 + +citus (6.0.1.citus-1) stable; urgency=low + + * Fixes a bug causing failures during pg_upgrade + + * Fixes a bug preventing DML queries during colocated table creation + + * Fixes a bug that caused NULL parameters to be incorrectly passed as text + + -- Burak Yucesoy Wed, 30 Nov 2016 15:27:38 +0000 + +citus (6.0.0.citus-1) stable; urgency=low + + * Adds compatibility with PostgreSQL 9.6, now the recommended version + + * Removes the pg_worker_list.conf file in favor of a pg_dist_node table + + * Adds master_add_node and master_add_node UDFs to manage membership + + * Removes the \stage command and corresponding csql binary in favor of COPY + + * Removes copy_to_distributed_table in favor of first-class COPY support + + * Adds support for multiple DDL statements within a transaction + + * Adds support for certain foreign key constraints + + * Adds support for parallel INSERT INTO ... SELECT against colocated tables + + * Adds support for the TRUNCATE command + + * Adds support for HAVING clauses in SELECT queries + + * Adds support for EXCLUDE constraints which include the partition column + + * Adds support for system columns in queries (tableoid, ctid, etc.) + + * Adds support for relation name extension within INDEX definitions + + * Adds support for no-op UPDATEs of the partition column + + * Adds several general-purpose utility UDFs to aid in Citus maintenance + + * Adds master_expire_table_cache UDF to forcibly expire cached shards + + * Parallelizes the processing of DDL commands which affect distributed tables + + * Adds support for repartition jobs using composite or custom types + + * Enhances object name extension to handle long names and large shard counts + + * Parallelizes the master_modify_multiple_shards UDF + + * Changes distributed table creation to error if target table is not empty + + * Changes the pg_dist_shard.logicalrelid column from an oid to regclass + + * Adds a placementid column to pg_dist_shard_placement, replacing Oid use + + * Removes the pg_dist_shard.shardalias distribution metadata column + + * Adds pg_dist_partition.repmodel to track tables using streaming replication + + * Adds internal infrastructure to take snapshots of distribution metadata + + * Addresses the need to invalidate prepared statements on metadata changes + + * Adds a mark_tables_colocated UDF for denoting pre-6.0 manual colocation + + * Fixes a bug affecting prepared statement execution within PL/pgSQL + + * Fixes a bug affecting COPY commands using composite types + + * Fixes a bug that could cause crashes during EXPLAIN EXECUTE + + * Separates worker and master job temporary folders + + * Eliminates race condition between distributed modification and repair + + * Relaxes the requirement that shard repairs also repair colocated shards + + * Implements internal functions to track which tables' shards are colocated + + * Adds pg_dist_partition.colocationid to track colocation group membership + + * Extends shard copy and move operations to respect colocation settings + + * Adds pg_dist_local_group to prepare for future MX-related changes + + * Adds create_distributed_table to easily create shards and infer colocation + + -- Jason Petersen Tue, 8 Nov 2016 19:45:45 +0000 + +citus (5.2.2.citus-1) stable; urgency=low + + * Adds support for IF NOT EXISTS clause of CREATE INDEX command + + * Adds support for RETURN QUERY and FOR ... IN PL/pgSQL features + + * Extends the router planner to handle more queries + + * Changes COUNT of zero-row sets to return 0 rather than an empty result + + * Reduces the minimum permitted task_tracker_delay to a single millisecond + + * Fixes a bug that caused crashes during joins with a WHERE false clause + + * Fixes a bug triggered by unique violation errors raised in long txns + + * Fixes a bug resulting in multiple registration of transaction callbacks + + * Fixes a bug which could result in stale reads of distribution metadata + + * Fixes a bug preventing distributed modifications in some PL/pgSQL functions + + * Fixes some code paths that could hypothetically read uninitialized memory + + * Lowers log level of "waiting for activity" messages + + -- Jason Petersen Tue, 8 Nov 2016 18:43:37 +0000 + +citus (5.2.1.citus-1) stable; urgency=low + + * Fixes subquery pushdown to properly extract outer join qualifiers + + * Addresses possible memory leak during multi-shard transactions + + -- Jason Petersen Tue, 6 Sep 2016 20:47:15 +0000 + +citus (5.2.0.citus-1) stable; urgency=low + + * Drops support for PostgreSQL 9.4; PostgreSQL 9.5 is required + + * Adds schema support for tables, named objects (types, operators, etc.) + + * Evaluates non-immutable functions on master in all modification commands + + * Adds support for SERIAL types in non-partition columns + + * Adds support for RETURNING clause in INSERT, UPDATE, and DELETE commands + + * Adds support for multi-statement transactions using a fixed set of nodes + + * Full SQL support for SELECT queries which can be executed on single worker + + * Adds option to perform DDL changes using prepared transactions (2PC) + + * Adds an enable_ddl_propagation parameter to control DDL propagation + + * Accelerates shard pruning during merges + + * Adds master_modify_multiple_shards UDF to modify many shards at once + + * Adds COPY support for arrays of user-defined types + + * Now supports parameterized prepared statements for certain use cases + + * Extends LIMIT/OFFSET support to all executor types + + * Constraint violations now fail fast rather than hitting all placements + + * Makes master_create_empty_shard aware of shard placement policy + + * Reduces unnecessary sleep during queries processed by real-time executor + + * Improves task tracker executor's task cleanup logic + + * Relaxes restrictions on cancellation of DDL commands + + * Removes ONLY keyword from worker SELECT queries + + * Error message improvements and standardization + + * Moves master_update_shard_statistics function to pg_catalog schema + + * Fixes a bug where hash-partitioned anti-joins could return bad results + + * Now sets storage type correctly for foreign table-backed shards + + * Fixes master_update_shard_statistics issue with hash-partitioned tables + + * Fixes an issue related to extending table names that require escaping + + * Reduces risk of row counter overflows during modifications + + * Fixes a crash related to FILTER clause use in COUNT DISTINCT subqueries + + * Fixes crashes related to partition columns with high attribute numbers + + * Fixes certain subquery and join crashes + + * Detects flex for build even if PostgreSQL was built without it + + * Fixes assert-enabled crash when all_modifications_commutative is true + + -- Jason Petersen Wed, 17 Aug 2016 10:23:21 +0000 + +citus (5.2.0~rc.1-1) testing; urgency=low + + * Release candidate for 5.2. + + -- Jason Petersen Mon, 01 Aug 2016 17:01:05 +0000 + +citus (5.1.1-1) stable; urgency=low + + * Adds complex count distinct expression support in repartitioned subqueries + + * Improves task tracker job cleanup logic, addressing a memory leak + + * Fixes bug that generated incorrect results for LEFT JOIN queries + + * Improves compatibility with Debian's reproducible builds project + + * Fixes build issues on FreeBSD platforms + + -- Jason Petersen Fri, 17 Jun 2016 16:20:15 +0000 + +citus (5.1.0-1) stable; urgency=low + + * Adds distributed COPY to rapidly populate distributed tables + + * Adds support for using EXPLAIN on distributed queries + + * Recognizes and fast-paths single-shard SELECT statements automatically + + * Increases INSERT throughput via shard pruning optimizations + + * Improves planner performance for joins involving tables with many shards + + * Adds ability to pass columns as arguments to function calls in UPDATEs + + * Introduces transaction manager for use by multi-shard commands + + * Adds COUNT(DISTINCT ...) pushdown optimization for hash-partitioned tables + + * Adds support for some UNIQUE indexes on hash- or range-partitioned tables + + * Deprecates \stage in favor of using COPY for append-partition tables + + * Deprecates copy_to_distributed_table in favor of first-class COPY support + + * Fixes build problems when using non-packaged PostgreSQL installs + + * Fixes bug that sometimes skipped pruning when partitioned by VARCHAR column + + * Fixes bug impeding use of user functions in repartitioned subqueries + + * Fixes bug involving queries with equality comparisons of boolean types + + * Fixes crash that prevented use alongside pg_stat_statements + + * Fixes crash arising from SELECT queries that lack a target list + + * Improves warning and error messages + + -- Jason Petersen Tue, 17 May 2016 16:55:02 +0000 + +citus (5.1.0~rc.2-1) testing; urgency=low + + * Fix EXPLAIN output when FORMAT JSON in use + + -- Jason Petersen Mon, 16 May 2016 11:08:09 +0000 + +citus (5.1.0~rc.1-1) testing; urgency=low + + * Release candidate for 5.1. + + -- Jason Petersen Wed, 04 May 2016 19:26:23 +0000 + +citus (5.0.1-1) stable; urgency=low + + * Fixes issues on 32-bit systems + + -- Jason Petersen Fri, 15 Apr 2016 19:17:35 +0000 + +citus (5.0.0-1) stable; urgency=low + + * Initial release + + -- Jason Petersen Thu, 24 Mar 2016 10:12:52 -0400 diff --git a/packaging_automation/tests/files/verify/expected_alpine_10.0.3.txt b/packaging_automation/tests/files/verify/expected_alpine_10.0.3.txt index a518032c..866e5456 100644 --- a/packaging_automation/tests/files/verify/expected_alpine_10.0.3.txt +++ b/packaging_automation/tests/files/verify/expected_alpine_10.0.3.txt @@ -1,58 +1,58 @@ -# This file is auto generated from it's template, -# see citusdata/tools/packaging_automation/templates/docker/alpine/alpine.tmpl.dockerfile. -FROM postgres:13.2-alpine -ARG VERSION=10.0.3 -LABEL maintainer="Citus Data https://citusdata.com" \ - org.label-schema.name="Citus" \ - org.label-schema.description="Scalable PostgreSQL for multi-tenant and real-time workloads" \ - org.label-schema.url="https://www.citusdata.com" \ - org.label-schema.vcs-url="https://github.com/citusdata/citus" \ - org.label-schema.vendor="Citus Data, Inc." \ - org.label-schema.version=${VERSION}-alpine \ - org.label-schema.schema-version="1.0" - -# Build citus and delete all used libraries. Warning: Libraries installed in this section will be deleted after build completion -RUN apk add --no-cache \ - --virtual builddeps \ - build-base \ - krb5-dev \ - curl \ - curl-dev \ - openssl-dev \ - ca-certificates \ - clang \ - llvm \ - lz4-dev \ - zstd-dev \ - libxslt-dev \ - libxml2-dev \ - icu-dev && \ - apk add --no-cache libcurl && \ - curl -sfLO "https://github.com/citusdata/citus/archive/v${VERSION}.tar.gz" && \ - tar xzf "v${VERSION}.tar.gz" && \ - cd "citus-${VERSION}" && \ - ./configure --with-security-flags && \ - make install && \ - cd .. && \ - rm -rf "citus-${VERSION}" "v${VERSION}.tar.gz" && \ - apk del builddeps - -#--------End of Citus Build - -# add citus to default PostgreSQL config -RUN echo "shared_preload_libraries='citus'" >> /usr/local/share/postgresql/postgresql.conf.sample - -# add scripts to run after initdb -COPY 001-create-citus-extension.sql /docker-entrypoint-initdb.d/ - -# add health check script -COPY pg_healthcheck / - -# entry point unsets PGPASSWORD, but we need it to connect to workers -# https://github.com/docker-library/postgres/blob/33bccfcaddd0679f55ee1028c012d26cd196537d/12/docker-entrypoint.sh#L303 -RUN sed "/unset PGPASSWORD/d" -i /usr/local/bin/docker-entrypoint.sh - -# Add lz4 dependencies -RUN apk add zstd zstd-dev lz4 lz4-dev - -HEALTHCHECK --interval=4s --start-period=6s CMD ./pg_healthcheck +# This file is auto generated from it's template, +# see citusdata/tools/packaging_automation/templates/docker/alpine/alpine.tmpl.dockerfile. +FROM postgres:13.2-alpine +ARG VERSION=10.0.3 +LABEL maintainer="Citus Data https://citusdata.com" \ + org.label-schema.name="Citus" \ + org.label-schema.description="Scalable PostgreSQL for multi-tenant and real-time workloads" \ + org.label-schema.url="https://www.citusdata.com" \ + org.label-schema.vcs-url="https://github.com/citusdata/citus" \ + org.label-schema.vendor="Citus Data, Inc." \ + org.label-schema.version=${VERSION}-alpine \ + org.label-schema.schema-version="1.0" + +# Build citus and delete all used libraries. Warning: Libraries installed in this section will be deleted after build completion +RUN apk add --no-cache \ + --virtual builddeps \ + build-base \ + krb5-dev \ + curl \ + curl-dev \ + openssl-dev \ + ca-certificates \ + clang \ + llvm \ + lz4-dev \ + zstd-dev \ + libxslt-dev \ + libxml2-dev \ + icu-dev && \ + apk add --no-cache libcurl && \ + curl -sfLO "https://github.com/citusdata/citus/archive/v${VERSION}.tar.gz" && \ + tar xzf "v${VERSION}.tar.gz" && \ + cd "citus-${VERSION}" && \ + ./configure --with-security-flags && \ + make install && \ + cd .. && \ + rm -rf "citus-${VERSION}" "v${VERSION}.tar.gz" && \ + apk del builddeps + +#--------End of Citus Build + +# add citus to default PostgreSQL config +RUN echo "shared_preload_libraries='citus'" >> /usr/local/share/postgresql/postgresql.conf.sample + +# add scripts to run after initdb +COPY 001-create-citus-extension.sql /docker-entrypoint-initdb.d/ + +# add health check script +COPY pg_healthcheck / + +# entry point unsets PGPASSWORD, but we need it to connect to workers +# https://github.com/docker-library/postgres/blob/33bccfcaddd0679f55ee1028c012d26cd196537d/12/docker-entrypoint.sh#L303 +RUN sed "/unset PGPASSWORD/d" -i /usr/local/bin/docker-entrypoint.sh + +# Add lz4 dependencies +RUN apk add zstd zstd-dev lz4 lz4-dev + +HEALTHCHECK --interval=4s --start-period=6s CMD ./pg_healthcheck diff --git a/packaging_automation/tests/files/verify/expected_debian_latest_v10.2.4.txt b/packaging_automation/tests/files/verify/expected_debian_latest_v10.2.4.txt index 2e5bc7d6..3dce76ec 100644 --- a/packaging_automation/tests/files/verify/expected_debian_latest_v10.2.4.txt +++ b/packaging_automation/tests/files/verify/expected_debian_latest_v10.2.4.txt @@ -1,6 +1,6 @@ -citus (10.2.4.citus-1) stable; urgency=low - - * Official 10.2.4 release of Citus - - -- Gurkan Indibay Thu, 18 Mar 2021 01:40:08 +0000 - +citus (10.2.4.citus-1) stable; urgency=low + + * Official 10.2.4 release of Citus + + -- Gurkan Indibay Thu, 18 Mar 2021 01:40:08 +0000 + diff --git a/packaging_automation/tests/files/verify/expected_pkgvars_10.2.4.txt b/packaging_automation/tests/files/verify/expected_pkgvars_10.2.4.txt index c7ab0982..effdb08d 100644 --- a/packaging_automation/tests/files/verify/expected_pkgvars_10.2.4.txt +++ b/packaging_automation/tests/files/verify/expected_pkgvars_10.2.4.txt @@ -1,4 +1,4 @@ -pkgname=citus -pkgdesc='Citus (Open-Source)' -pkglatest=10.2.4 -versioning=fancy +pkgname=citus +pkgdesc='Citus (Open-Source)' +pkglatest=10.2.4 +versioning=fancy diff --git a/packaging_automation/tests/files/verify/rpm_latest_changelog_reference.txt b/packaging_automation/tests/files/verify/rpm_latest_changelog_reference.txt index ff0af235..72aef905 100644 --- a/packaging_automation/tests/files/verify/rpm_latest_changelog_reference.txt +++ b/packaging_automation/tests/files/verify/rpm_latest_changelog_reference.txt @@ -1,2 +1,2 @@ -* Tue Feb 01 2022 - Gurkan Indibay 10.2.4.citus-1 +* Tue Feb 01 2022 - Gurkan Indibay 10.2.4.citus-1 - Official 10.2.4 release of Citus \ No newline at end of file diff --git a/packaging_automation/tests/test_citus_package.py b/packaging_automation/tests/test_citus_package.py index 6bb673bc..1f9127c0 100644 --- a/packaging_automation/tests/test_citus_package.py +++ b/packaging_automation/tests/test_citus_package.py @@ -1,233 +1,233 @@ -import os - -import pathlib2 -from dotenv import dotenv_values - -from .test_utils import generate_new_gpg_key -from ..citus_package import ( - POSTGRES_VERSION_FILE, - BuildType, - InputOutputParameters, - SigningCredentials, - build_packages, - decode_os_and_release, - get_build_platform, - get_release_package_folder_name, - get_postgres_versions, -) -from ..common_tool_methods import ( - define_rpm_public_key_to_machine, - delete_all_gpg_keys_by_name, - delete_rpm_key_by_name, - get_gpg_fingerprints_by_name, - get_private_key_by_fingerprint_with_passphrase, - run, - transform_key_into_base64_str, - verify_rpm_signature_in_dir, -) -from ..upload_to_package_cloud import ( - delete_package_from_package_cloud, - package_exists, - upload_files_in_directory_to_package_cloud, -) - -TEST_BASE_PATH = os.getenv("BASE_PATH", default=pathlib2.Path(__file__).parents[2]) - -PACKAGING_SOURCE_FOLDER = "packaging_test" -PACKAGING_EXEC_FOLDER = f"{TEST_BASE_PATH}/{PACKAGING_SOURCE_FOLDER}" -BASE_OUTPUT_FOLDER = f"{PACKAGING_EXEC_FOLDER}/packages" - -single_postgres_package_counts = { - "el/7": 2, - "el/8": 3, - "ol/7": 2, - "ol/8": 3, - "almalinux/9": 3, - "almalinux/8": 3, - "rockylinux/9": 3, - "el/9": 3, - "ol/9": 3, - "debian/stretch": 2, - "debian/buster": 2, - "debian/bullseye": 2, - "debian/bookworm": 2, - "ubuntu/bionic": 2, - "ubuntu/focal": 2, - "ubuntu/jammy": 2, - "ubuntu/kinetic": 2, -} - -TEST_GPG_KEY_NAME = "Citus Data " -TEST_GPG_KEY_PASSPHRASE = os.getenv("PACKAGING_PASSPHRASE") -GH_TOKEN = os.getenv("GH_TOKEN") -PACKAGE_CLOUD_API_TOKEN = os.getenv("PACKAGE_CLOUD_API_TOKEN") -REPO_CLIENT_SECRET = os.getenv("REPO_CLIENT_SECRET") -PLATFORM = get_build_platform( - os.getenv("PLATFORM"), os.getenv("PACKAGING_IMAGE_PLATFORM") -) -PACKAGING_BRANCH_NAME = os.getenv("PACKAGING_BRANCH_NAME", "all-citus-unit-tests") - - -def get_required_package_count(input_files_dir: str, platform: str): - release_versions, _ = get_postgres_versions( - platform=platform, input_files_dir=input_files_dir - ) - print( - f"get_required_package_count: Release versions:{release_versions}:{single_postgres_package_counts[platform]}" - ) - return len(release_versions) * single_postgres_package_counts[platform] - - -def setup_module(): - # Run tests against "all-citus-unit-tests" since we don't want to deal with the changes - # made to "all-citus" in each release. - packaging_branch_name = ( - "pgxn-citus" if PLATFORM == "pgxn" else PACKAGING_BRANCH_NAME - ) - if not os.path.exists(PACKAGING_EXEC_FOLDER): - run( - f"git clone --branch {packaging_branch_name} https://github.com/citusdata/packaging.git" - f" {PACKAGING_EXEC_FOLDER}" - ) - - -def teardown_module(): - if os.path.exists("packaging_test"): - run("rm -r packaging_test") - - -def test_build_packages(): - delete_all_gpg_keys_by_name(TEST_GPG_KEY_NAME) - delete_rpm_key_by_name(TEST_GPG_KEY_NAME) - generate_new_gpg_key( - f"{TEST_BASE_PATH}/packaging_automation/tests/files/gpg/packaging_with_passphrase.gpg" - ) - gpg_fingerprints = get_gpg_fingerprints_by_name(TEST_GPG_KEY_NAME) - assert len(gpg_fingerprints) > 0 - secret_key = transform_key_into_base64_str( - get_private_key_by_fingerprint_with_passphrase( - gpg_fingerprints[0], TEST_GPG_KEY_PASSPHRASE - ) - ) - define_rpm_public_key_to_machine(gpg_fingerprints[0]) - signing_credentials = SigningCredentials(secret_key, TEST_GPG_KEY_PASSPHRASE) - input_output_parameters = InputOutputParameters.build( - PACKAGING_EXEC_FOLDER, BASE_OUTPUT_FOLDER, output_validation=False - ) - - build_packages( - GH_TOKEN, - PLATFORM, - BuildType.release, - signing_credentials, - input_output_parameters, - is_test=True, - ) - verify_rpm_signature_in_dir(BASE_OUTPUT_FOLDER) - os_name, os_version = decode_os_and_release(PLATFORM) - sub_folder = get_release_package_folder_name(os_name, os_version) - release_output_folder = f"{BASE_OUTPUT_FOLDER}/{sub_folder}" - delete_all_gpg_keys_by_name(TEST_GPG_KEY_NAME) - - postgres_version_file_path = f"{PACKAGING_EXEC_FOLDER}/{POSTGRES_VERSION_FILE}" - if PLATFORM != "pgxn": - assert len(os.listdir(release_output_folder)) == get_required_package_count( - input_files_dir=PACKAGING_EXEC_FOLDER, platform=PLATFORM - ) - assert os.path.exists(postgres_version_file_path) - config = dotenv_values(postgres_version_file_path) - assert config["release_versions"] == "12,13,14" - assert config["nightly_versions"] == "14,15" - - -def test_get_required_package_count(): - assert ( - get_required_package_count( - input_files_dir=PACKAGING_EXEC_FOLDER, platform="el/8" - ) - == 9 - ) - - -def test_decode_os_packages(): - os, release = decode_os_and_release("el/7") - assert os == "el" and release == "7" - - -def test_get_postgres_versions_ol_7(): - release_versions, nightly_versions = get_postgres_versions( - input_files_dir=f"{os.getcwd()}/packaging_automation/tests/files/get_postgres_versions_tests", - platform="ol/7", - ) - # pg 15 is excluded for all releases with pg_exclude file - assert len(release_versions) == 2 and release_versions == ["13", "14"] - assert len(nightly_versions) == 2 and nightly_versions == ["13", "14"] - - -def test_get_postgres_versions_el_7(): - release_versions, nightly_versions = get_postgres_versions( - input_files_dir=f"{os.getcwd()}/packaging_automation/tests/files/get_postgres_versions_tests", - platform="el/7", - ) - # pg 15 is excluded for all releases with pg_exclude file - assert len(release_versions) == 2 and release_versions == ["13", "14"] - assert len(nightly_versions) == 2 and nightly_versions == ["14", "15"] - - -def test_get_postgres_versions_debain_bullseye(): - release_versions, nightly_versions = get_postgres_versions( - input_files_dir=f"{os.getcwd()}/packaging_automation/tests/files/get_postgres_versions_tests", - platform="debian/bullseye", - ) - # pg 15 is excluded for all releases with pg_exclude file - assert len(release_versions) == 2 and release_versions == ["13", "14"] - assert len(nightly_versions) == 2 and nightly_versions == ["14", "15"] - - -def test_upload_to_package_cloud(): - platform = get_build_platform( - os.getenv("PLATFORM"), os.getenv("PACKAGING_IMAGE_PLATFORM") - ) - current_branch = "all-citus" - main_branch = "all-citus" - output = upload_files_in_directory_to_package_cloud( - BASE_OUTPUT_FOLDER, - platform, - PACKAGE_CLOUD_API_TOKEN, - "citus-bot/sample", - current_branch, - main_branch, - ) - distro_parts = platform.split("/") - if len(distro_parts) != 2: - raise ValueError( - "Platform should consist of two parts splitted with '/' e.g. el/8" - ) - for return_value in output.return_values: - exists = package_exists( - PACKAGE_CLOUD_API_TOKEN, - "citus-bot", - "sample", - os.path.basename(return_value.file_name), - platform, - ) - if not exists: - raise ValueError( - f"{os.path.basename(return_value.file_name)} could not be found on package cloud" - ) - - for return_value in output.return_values: - delete_output = delete_package_from_package_cloud( - PACKAGE_CLOUD_API_TOKEN, - "citus-bot", - "sample", - distro_parts[0], - distro_parts[1], - os.path.basename(return_value.file_name), - ) - if delete_output.success_status: - print(f"{os.path.basename(return_value.file_name)} deleted successfully") - else: - print( - f"{os.path.basename(return_value.file_name)} can not be deleted. Message: {delete_output.message}" - ) +import os + +import pathlib2 +from dotenv import dotenv_values + +from .test_utils import generate_new_gpg_key +from ..citus_package import ( + POSTGRES_VERSION_FILE, + BuildType, + InputOutputParameters, + SigningCredentials, + build_packages, + decode_os_and_release, + get_build_platform, + get_release_package_folder_name, + get_postgres_versions, +) +from ..common_tool_methods import ( + define_rpm_public_key_to_machine, + delete_all_gpg_keys_by_name, + delete_rpm_key_by_name, + get_gpg_fingerprints_by_name, + get_private_key_by_fingerprint_with_passphrase, + run, + transform_key_into_base64_str, + verify_rpm_signature_in_dir, +) +from ..upload_to_package_cloud import ( + delete_package_from_package_cloud, + package_exists, + upload_files_in_directory_to_package_cloud, +) + +TEST_BASE_PATH = os.getenv("BASE_PATH", default=pathlib2.Path(__file__).parents[2]) + +PACKAGING_SOURCE_FOLDER = "packaging_test" +PACKAGING_EXEC_FOLDER = f"{TEST_BASE_PATH}/{PACKAGING_SOURCE_FOLDER}" +BASE_OUTPUT_FOLDER = f"{PACKAGING_EXEC_FOLDER}/packages" + +single_postgres_package_counts = { + "el/7": 2, + "el/8": 3, + "ol/7": 2, + "ol/8": 3, + "almalinux/9": 3, + "almalinux/8": 3, + "rockylinux/9": 3, + "el/9": 3, + "ol/9": 3, + "debian/stretch": 2, + "debian/buster": 2, + "debian/bullseye": 2, + "debian/bookworm": 2, + "ubuntu/bionic": 2, + "ubuntu/focal": 2, + "ubuntu/jammy": 2, + "ubuntu/kinetic": 2, +} + +TEST_GPG_KEY_NAME = "Citus Data " +TEST_GPG_KEY_PASSPHRASE = os.getenv("PACKAGING_PASSPHRASE") +GH_TOKEN = os.getenv("GH_TOKEN") +PACKAGE_CLOUD_API_TOKEN = os.getenv("PACKAGE_CLOUD_API_TOKEN") +REPO_CLIENT_SECRET = os.getenv("REPO_CLIENT_SECRET") +PLATFORM = get_build_platform( + os.getenv("PLATFORM"), os.getenv("PACKAGING_IMAGE_PLATFORM") +) +PACKAGING_BRANCH_NAME = os.getenv("PACKAGING_BRANCH_NAME", "all-citus-unit-tests") + + +def get_required_package_count(input_files_dir: str, platform: str): + release_versions, _ = get_postgres_versions( + platform=platform, input_files_dir=input_files_dir + ) + print( + f"get_required_package_count: Release versions:{release_versions}:{single_postgres_package_counts[platform]}" + ) + return len(release_versions) * single_postgres_package_counts[platform] + + +def setup_module(): + # Run tests against "all-citus-unit-tests" since we don't want to deal with the changes + # made to "all-citus" in each release. + packaging_branch_name = ( + "pgxn-citus" if PLATFORM == "pgxn" else PACKAGING_BRANCH_NAME + ) + if not os.path.exists(PACKAGING_EXEC_FOLDER): + run( + f"git clone --branch {packaging_branch_name} https://github.com/citusdata/packaging.git" + f" {PACKAGING_EXEC_FOLDER}" + ) + + +def teardown_module(): + if os.path.exists("packaging_test"): + run("rm -r packaging_test") + + +def test_build_packages(): + delete_all_gpg_keys_by_name(TEST_GPG_KEY_NAME) + delete_rpm_key_by_name(TEST_GPG_KEY_NAME) + generate_new_gpg_key( + f"{TEST_BASE_PATH}/packaging_automation/tests/files/gpg/packaging_with_passphrase.gpg" + ) + gpg_fingerprints = get_gpg_fingerprints_by_name(TEST_GPG_KEY_NAME) + assert len(gpg_fingerprints) > 0 + secret_key = transform_key_into_base64_str( + get_private_key_by_fingerprint_with_passphrase( + gpg_fingerprints[0], TEST_GPG_KEY_PASSPHRASE + ) + ) + define_rpm_public_key_to_machine(gpg_fingerprints[0]) + signing_credentials = SigningCredentials(secret_key, TEST_GPG_KEY_PASSPHRASE) + input_output_parameters = InputOutputParameters.build( + PACKAGING_EXEC_FOLDER, BASE_OUTPUT_FOLDER, output_validation=False + ) + + build_packages( + GH_TOKEN, + PLATFORM, + BuildType.release, + signing_credentials, + input_output_parameters, + is_test=True, + ) + verify_rpm_signature_in_dir(BASE_OUTPUT_FOLDER) + os_name, os_version = decode_os_and_release(PLATFORM) + sub_folder = get_release_package_folder_name(os_name, os_version) + release_output_folder = f"{BASE_OUTPUT_FOLDER}/{sub_folder}" + delete_all_gpg_keys_by_name(TEST_GPG_KEY_NAME) + + postgres_version_file_path = f"{PACKAGING_EXEC_FOLDER}/{POSTGRES_VERSION_FILE}" + if PLATFORM != "pgxn": + assert len(os.listdir(release_output_folder)) == get_required_package_count( + input_files_dir=PACKAGING_EXEC_FOLDER, platform=PLATFORM + ) + assert os.path.exists(postgres_version_file_path) + config = dotenv_values(postgres_version_file_path) + assert config["release_versions"] == "12,13,14" + assert config["nightly_versions"] == "14,15" + + +def test_get_required_package_count(): + assert ( + get_required_package_count( + input_files_dir=PACKAGING_EXEC_FOLDER, platform="el/8" + ) + == 9 + ) + + +def test_decode_os_packages(): + os, release = decode_os_and_release("el/7") + assert os == "el" and release == "7" + + +def test_get_postgres_versions_ol_7(): + release_versions, nightly_versions = get_postgres_versions( + input_files_dir=f"{os.getcwd()}/packaging_automation/tests/files/get_postgres_versions_tests", + platform="ol/7", + ) + # pg 15 is excluded for all releases with pg_exclude file + assert len(release_versions) == 2 and release_versions == ["13", "14"] + assert len(nightly_versions) == 2 and nightly_versions == ["13", "14"] + + +def test_get_postgres_versions_el_7(): + release_versions, nightly_versions = get_postgres_versions( + input_files_dir=f"{os.getcwd()}/packaging_automation/tests/files/get_postgres_versions_tests", + platform="el/7", + ) + # pg 15 is excluded for all releases with pg_exclude file + assert len(release_versions) == 2 and release_versions == ["13", "14"] + assert len(nightly_versions) == 2 and nightly_versions == ["14", "15"] + + +def test_get_postgres_versions_debain_bullseye(): + release_versions, nightly_versions = get_postgres_versions( + input_files_dir=f"{os.getcwd()}/packaging_automation/tests/files/get_postgres_versions_tests", + platform="debian/bullseye", + ) + # pg 15 is excluded for all releases with pg_exclude file + assert len(release_versions) == 2 and release_versions == ["13", "14"] + assert len(nightly_versions) == 2 and nightly_versions == ["14", "15"] + + +def test_upload_to_package_cloud(): + platform = get_build_platform( + os.getenv("PLATFORM"), os.getenv("PACKAGING_IMAGE_PLATFORM") + ) + current_branch = "all-citus" + main_branch = "all-citus" + output = upload_files_in_directory_to_package_cloud( + BASE_OUTPUT_FOLDER, + platform, + PACKAGE_CLOUD_API_TOKEN, + "citus-bot/sample", + current_branch, + main_branch, + ) + distro_parts = platform.split("/") + if len(distro_parts) != 2: + raise ValueError( + "Platform should consist of two parts splitted with '/' e.g. el/8" + ) + for return_value in output.return_values: + exists = package_exists( + PACKAGE_CLOUD_API_TOKEN, + "citus-bot", + "sample", + os.path.basename(return_value.file_name), + platform, + ) + if not exists: + raise ValueError( + f"{os.path.basename(return_value.file_name)} could not be found on package cloud" + ) + + for return_value in output.return_values: + delete_output = delete_package_from_package_cloud( + PACKAGE_CLOUD_API_TOKEN, + "citus-bot", + "sample", + distro_parts[0], + distro_parts[1], + os.path.basename(return_value.file_name), + ) + if delete_output.success_status: + print(f"{os.path.basename(return_value.file_name)} deleted successfully") + else: + print( + f"{os.path.basename(return_value.file_name)} can not be deleted. Message: {delete_output.message}" + ) diff --git a/packaging_automation/tests/test_citus_package_utils.py b/packaging_automation/tests/test_citus_package_utils.py index 37f0118a..e3f1e449 100644 --- a/packaging_automation/tests/test_citus_package_utils.py +++ b/packaging_automation/tests/test_citus_package_utils.py @@ -1,215 +1,215 @@ -import os -import subprocess - -import pathlib2 -import pytest - -from .test_utils import generate_new_gpg_key -from ..citus_package import ( - decode_os_and_release, - is_docker_running, - get_signing_credentials, - get_postgres_versions, - build_package, - BuildType, - sign_packages, - SigningCredentials, - InputOutputParameters, - get_package_version_without_release_stage_from_pkgvars, - write_postgres_versions_into_file, -) -from ..common_tool_methods import ( - delete_all_gpg_keys_by_name, - get_gpg_fingerprints_by_name, - run, - get_private_key_by_fingerprint_without_passphrase, - define_rpm_public_key_to_machine, - delete_rpm_key_by_name, - get_private_key_by_fingerprint_with_passphrase, - verify_rpm_signature_in_dir, - transform_key_into_base64_str, -) - -TEST_BASE_PATH = os.getenv("BASE_PATH", default=pathlib2.Path(__file__).parents[2]) -TEST_GPG_KEY_NAME = "Citus Data " -TEST_GPG_KEY_PASSPHRASE = "Citus123" -GH_TOKEN = os.getenv("GH_TOKEN") - -PACKAGING_SOURCE_FOLDER = "packaging_test" -PACKAGING_EXEC_FOLDER = f"{TEST_BASE_PATH}/{PACKAGING_SOURCE_FOLDER}" -OUTPUT_FOLDER = f"{PACKAGING_EXEC_FOLDER}/packages" -INPUT_OUTPUT_PARAMETERS = InputOutputParameters.build( - PACKAGING_EXEC_FOLDER, OUTPUT_FOLDER, output_validation=False -) - - -def setup_module(): - if not os.path.exists("packaging_test"): - run( - f"git clone --branch all-citus-unit-tests https://github.com/citusdata/packaging.git {PACKAGING_SOURCE_FOLDER}" - ) - - -def teardown_module(): - if os.path.exists("packaging_test"): - run("rm -r packaging_test") - - -def test_decode_os_and_release(): - os_name, os_version = decode_os_and_release("el/7") - assert os_name == "el" and os_version == "7" - - os_name, os_version = decode_os_and_release("debian/buster") - assert os_name == "debian" and os_version == "buster" - - os_name, os_version = decode_os_and_release("pgxn") - assert os_name == "pgxn" and os_version == "" - - with pytest.raises(ValueError): - decode_os_and_release("debian") - - with pytest.raises(ValueError): - decode_os_and_release("debian/anders") - - -def test_is_docker_running(): - assert is_docker_running() - - -def test_get_signing_credentials(): - signing_credentials = get_signing_credentials("verysecretkey", "123") - assert ( - signing_credentials.secret_key == "verysecretkey" - and signing_credentials.passphrase == "123" - ) - - delete_all_gpg_keys_by_name(TEST_GPG_KEY_NAME) - - generate_new_gpg_key( - f"{TEST_BASE_PATH}/packaging_automation/tests/files/gpg/packaging.gpg" - ) - os.environ["PACKAGING_PASSPHRASE"] = TEST_GPG_KEY_PASSPHRASE - signing_credentials = get_signing_credentials("", TEST_GPG_KEY_PASSPHRASE) - fingerprints = get_gpg_fingerprints_by_name(TEST_GPG_KEY_NAME) - assert len(fingerprints) > 0 - expected_gpg_key = get_private_key_by_fingerprint_without_passphrase( - fingerprints[0] - ) - delete_all_gpg_keys_by_name(TEST_GPG_KEY_NAME) - assert ( - signing_credentials.secret_key - == transform_key_into_base64_str(expected_gpg_key) - and signing_credentials.passphrase == TEST_GPG_KEY_PASSPHRASE - ) - - -def test_delete_rpm_key_by_name(): - delete_all_gpg_keys_by_name(TEST_GPG_KEY_NAME) - generate_new_gpg_key( - f"{TEST_BASE_PATH}/packaging_automation/tests/files/gpg/packaging_with_passphrase.gpg" - ) - fingerprints = get_gpg_fingerprints_by_name(TEST_GPG_KEY_NAME) - assert len(fingerprints) > 0 - define_rpm_public_key_to_machine(fingerprints[0]) - delete_all_gpg_keys_by_name(TEST_GPG_KEY_NAME) - - # return code is checked so check is not required - # pylint: disable=subprocess-run-check - output = subprocess.run( - ["rpm", "-q gpg-pubkey", "--qf %{NAME}-%{VERSION}-%{RELEASE}\t%{SUMMARY}\n"], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - ) - - assert ( - TEST_GPG_KEY_NAME not in output.stdout.decode("ascii") - and output.returncode == 1 - ) - - -def test_get_postgres_versions(): - release_versions, nightly_versions = get_postgres_versions( - platform="el/8", - input_files_dir=f"{TEST_BASE_PATH}/packaging_automation/tests/files", - ) - assert release_versions == ["11", "12", "13"] and nightly_versions == [ - "12", - "13", - "14", - ] - - -def test_build_package_debian(): - input_output_parameters = InputOutputParameters.build( - PACKAGING_EXEC_FOLDER, - f"{OUTPUT_FOLDER}/debian-stretch", - output_validation=False, - ) - - package_version = get_package_version_without_release_stage_from_pkgvars( - input_output_parameters.input_files_dir - ) - write_postgres_versions_into_file( - input_output_parameters.input_files_dir, package_version - ) - - build_package( - github_token=GH_TOKEN, - build_type=BuildType.release, - docker_platform="debian-stretch", - postgres_version="all", - input_output_parameters=input_output_parameters, - is_test=True, - ) - - -def test_build_package_rpm(): - input_output_parameters = InputOutputParameters.build( - PACKAGING_EXEC_FOLDER, - f"{OUTPUT_FOLDER}/debian-stretch", - output_validation=False, - ) - - build_package( - github_token=GH_TOKEN, - build_type=BuildType.release, - docker_platform="almalinux-8", - postgres_version="13", - input_output_parameters=input_output_parameters, - is_test=True, - ) - - -def test_sign_packages(): - delete_all_gpg_keys_by_name(TEST_GPG_KEY_NAME) - delete_rpm_key_by_name(TEST_GPG_KEY_NAME) - generate_new_gpg_key( - f"{TEST_BASE_PATH}/packaging_automation/tests/files/gpg/packaging_with_passphrase.gpg" - ) - gpg_fingerprints = get_gpg_fingerprints_by_name(TEST_GPG_KEY_NAME) - assert len(gpg_fingerprints) > 0 - private_key = get_private_key_by_fingerprint_with_passphrase( - gpg_fingerprints[0], TEST_GPG_KEY_PASSPHRASE - ) - secret_key = transform_key_into_base64_str(private_key) - define_rpm_public_key_to_machine(gpg_fingerprints[0]) - signing_credentials = SigningCredentials( - secret_key=secret_key, passphrase=TEST_GPG_KEY_PASSPHRASE - ) - input_output_parameters = InputOutputParameters.build( - PACKAGING_EXEC_FOLDER, f"{OUTPUT_FOLDER}", output_validation=False - ) - sign_packages( - sub_folder="centos-8", - signing_credentials=signing_credentials, - input_output_parameters=input_output_parameters, - ) - sign_packages( - sub_folder="debian-stretch", - signing_credentials=signing_credentials, - input_output_parameters=input_output_parameters, - ) - verify_rpm_signature_in_dir(OUTPUT_FOLDER) - - delete_all_gpg_keys_by_name(TEST_GPG_KEY_NAME) - run(f"rm -r {OUTPUT_FOLDER}") +import os +import subprocess + +import pathlib2 +import pytest + +from .test_utils import generate_new_gpg_key +from ..citus_package import ( + decode_os_and_release, + is_docker_running, + get_signing_credentials, + get_postgres_versions, + build_package, + BuildType, + sign_packages, + SigningCredentials, + InputOutputParameters, + get_package_version_without_release_stage_from_pkgvars, + write_postgres_versions_into_file, +) +from ..common_tool_methods import ( + delete_all_gpg_keys_by_name, + get_gpg_fingerprints_by_name, + run, + get_private_key_by_fingerprint_without_passphrase, + define_rpm_public_key_to_machine, + delete_rpm_key_by_name, + get_private_key_by_fingerprint_with_passphrase, + verify_rpm_signature_in_dir, + transform_key_into_base64_str, +) + +TEST_BASE_PATH = os.getenv("BASE_PATH", default=pathlib2.Path(__file__).parents[2]) +TEST_GPG_KEY_NAME = "Citus Data " +TEST_GPG_KEY_PASSPHRASE = "Citus123" +GH_TOKEN = os.getenv("GH_TOKEN") + +PACKAGING_SOURCE_FOLDER = "packaging_test" +PACKAGING_EXEC_FOLDER = f"{TEST_BASE_PATH}/{PACKAGING_SOURCE_FOLDER}" +OUTPUT_FOLDER = f"{PACKAGING_EXEC_FOLDER}/packages" +INPUT_OUTPUT_PARAMETERS = InputOutputParameters.build( + PACKAGING_EXEC_FOLDER, OUTPUT_FOLDER, output_validation=False +) + + +def setup_module(): + if not os.path.exists("packaging_test"): + run( + f"git clone --branch all-citus-unit-tests https://github.com/citusdata/packaging.git {PACKAGING_SOURCE_FOLDER}" + ) + + +def teardown_module(): + if os.path.exists("packaging_test"): + run("rm -r packaging_test") + + +def test_decode_os_and_release(): + os_name, os_version = decode_os_and_release("el/7") + assert os_name == "el" and os_version == "7" + + os_name, os_version = decode_os_and_release("debian/buster") + assert os_name == "debian" and os_version == "buster" + + os_name, os_version = decode_os_and_release("pgxn") + assert os_name == "pgxn" and os_version == "" + + with pytest.raises(ValueError): + decode_os_and_release("debian") + + with pytest.raises(ValueError): + decode_os_and_release("debian/anders") + + +def test_is_docker_running(): + assert is_docker_running() + + +def test_get_signing_credentials(): + signing_credentials = get_signing_credentials("verysecretkey", "123") + assert ( + signing_credentials.secret_key == "verysecretkey" + and signing_credentials.passphrase == "123" + ) + + delete_all_gpg_keys_by_name(TEST_GPG_KEY_NAME) + + generate_new_gpg_key( + f"{TEST_BASE_PATH}/packaging_automation/tests/files/gpg/packaging.gpg" + ) + os.environ["PACKAGING_PASSPHRASE"] = TEST_GPG_KEY_PASSPHRASE + signing_credentials = get_signing_credentials("", TEST_GPG_KEY_PASSPHRASE) + fingerprints = get_gpg_fingerprints_by_name(TEST_GPG_KEY_NAME) + assert len(fingerprints) > 0 + expected_gpg_key = get_private_key_by_fingerprint_without_passphrase( + fingerprints[0] + ) + delete_all_gpg_keys_by_name(TEST_GPG_KEY_NAME) + assert ( + signing_credentials.secret_key + == transform_key_into_base64_str(expected_gpg_key) + and signing_credentials.passphrase == TEST_GPG_KEY_PASSPHRASE + ) + + +def test_delete_rpm_key_by_name(): + delete_all_gpg_keys_by_name(TEST_GPG_KEY_NAME) + generate_new_gpg_key( + f"{TEST_BASE_PATH}/packaging_automation/tests/files/gpg/packaging_with_passphrase.gpg" + ) + fingerprints = get_gpg_fingerprints_by_name(TEST_GPG_KEY_NAME) + assert len(fingerprints) > 0 + define_rpm_public_key_to_machine(fingerprints[0]) + delete_all_gpg_keys_by_name(TEST_GPG_KEY_NAME) + + # return code is checked so check is not required + # pylint: disable=subprocess-run-check + output = subprocess.run( + ["rpm", "-q gpg-pubkey", "--qf %{NAME}-%{VERSION}-%{RELEASE}\t%{SUMMARY}\n"], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + + assert ( + TEST_GPG_KEY_NAME not in output.stdout.decode("ascii") + and output.returncode == 1 + ) + + +def test_get_postgres_versions(): + release_versions, nightly_versions = get_postgres_versions( + platform="el/8", + input_files_dir=f"{TEST_BASE_PATH}/packaging_automation/tests/files", + ) + assert release_versions == ["11", "12", "13"] and nightly_versions == [ + "12", + "13", + "14", + ] + + +def test_build_package_debian(): + input_output_parameters = InputOutputParameters.build( + PACKAGING_EXEC_FOLDER, + f"{OUTPUT_FOLDER}/debian-stretch", + output_validation=False, + ) + + package_version = get_package_version_without_release_stage_from_pkgvars( + input_output_parameters.input_files_dir + ) + write_postgres_versions_into_file( + input_output_parameters.input_files_dir, package_version + ) + + build_package( + github_token=GH_TOKEN, + build_type=BuildType.release, + docker_platform="debian-stretch", + postgres_version="all", + input_output_parameters=input_output_parameters, + is_test=True, + ) + + +def test_build_package_rpm(): + input_output_parameters = InputOutputParameters.build( + PACKAGING_EXEC_FOLDER, + f"{OUTPUT_FOLDER}/debian-stretch", + output_validation=False, + ) + + build_package( + github_token=GH_TOKEN, + build_type=BuildType.release, + docker_platform="almalinux-8", + postgres_version="13", + input_output_parameters=input_output_parameters, + is_test=True, + ) + + +def test_sign_packages(): + delete_all_gpg_keys_by_name(TEST_GPG_KEY_NAME) + delete_rpm_key_by_name(TEST_GPG_KEY_NAME) + generate_new_gpg_key( + f"{TEST_BASE_PATH}/packaging_automation/tests/files/gpg/packaging_with_passphrase.gpg" + ) + gpg_fingerprints = get_gpg_fingerprints_by_name(TEST_GPG_KEY_NAME) + assert len(gpg_fingerprints) > 0 + private_key = get_private_key_by_fingerprint_with_passphrase( + gpg_fingerprints[0], TEST_GPG_KEY_PASSPHRASE + ) + secret_key = transform_key_into_base64_str(private_key) + define_rpm_public_key_to_machine(gpg_fingerprints[0]) + signing_credentials = SigningCredentials( + secret_key=secret_key, passphrase=TEST_GPG_KEY_PASSPHRASE + ) + input_output_parameters = InputOutputParameters.build( + PACKAGING_EXEC_FOLDER, f"{OUTPUT_FOLDER}", output_validation=False + ) + sign_packages( + sub_folder="centos-8", + signing_credentials=signing_credentials, + input_output_parameters=input_output_parameters, + ) + sign_packages( + sub_folder="debian-stretch", + signing_credentials=signing_credentials, + input_output_parameters=input_output_parameters, + ) + verify_rpm_signature_in_dir(OUTPUT_FOLDER) + + delete_all_gpg_keys_by_name(TEST_GPG_KEY_NAME) + run(f"rm -r {OUTPUT_FOLDER}") diff --git a/packaging_automation/tests/test_common_tool_methods.py b/packaging_automation/tests/test_common_tool_methods.py index cf3fe89f..8e1bed07 100644 --- a/packaging_automation/tests/test_common_tool_methods.py +++ b/packaging_automation/tests/test_common_tool_methods.py @@ -1,359 +1,359 @@ -import os -import uuid -from datetime import datetime -from shutil import copyfile - -import pathlib2 -from github import Github - -from .test_utils import generate_new_gpg_key -from ..common_tool_methods import ( - DEFAULT_ENCODING_FOR_FILE_HANDLING, - DEFAULT_UNICODE_ERROR_HANDLER, - append_line_in_file, - define_rpm_public_key_to_machine, - delete_all_gpg_keys_by_name, - delete_rpm_key_by_name, - filter_prs_by_label, - find_nth_matching_line_and_line_number, - find_nth_occurrence_position, - get_current_branch, - get_gpg_fingerprints_by_name, - get_last_commit_message, - get_minor_version, - get_patch_version_regex, - get_project_version_from_tag_name, - get_prs_for_patch_release, - get_supported_postgres_release_versions, - get_supported_postgres_nightly_versions, - get_upcoming_minor_version, - get_version_details, - is_major_release, - is_tag_on_branch, - local_branch_exists, - prepend_line_in_file, - process_template_file, - remote_branch_exists, - remove_prefix, - remove_text_with_parenthesis, - replace_line_in_file, - rpm_key_matches_summary, - run, - run_with_output, - str_array_to_str, -) - -GITHUB_TOKEN = os.getenv("GH_TOKEN") -BASE_PATH = pathlib2.Path(__file__).parents[1] -TEST_BASE_PATH = pathlib2.Path(__file__).parent.absolute() -TEST_GPG_KEY_NAME = "Citus Data " - - -def test_find_nth_occurrence_position(): - assert find_nth_occurrence_position("foofoo foofoo", "foofoo", 2) == 7 - - -def test_find_nth_matching_line_number_by_regex(): - assert ( - find_nth_matching_line_and_line_number( - "citusx\n citusx\ncitusx", "^citusx$", 2 - )[0] - == 2 - ) - assert ( - find_nth_matching_line_and_line_number( - "citusx\n citusx\ncitusx", "^citusy$", 2 - )[0] - == -1 - ) - - -def test_is_major_release(): - assert is_major_release("10.0.0") - assert not is_major_release("10.0.1") - - -def test_get_project_version_from_tag_name(): - tag_name = "v10.0.3" - assert get_project_version_from_tag_name(tag_name) == "10.0.3" - - -def test_str_array_to_str(): - assert str_array_to_str(["1", "2", "3", "4"]) == "1\n2\n3\n4\n" - - -def test_run(): - result = run("echo 'Run' method is performing fine ") - assert result.returncode == 0 - - -def test_remove_paranthesis_from_string(): - assert ( - remove_text_with_parenthesis("out of paranthesis (inside paranthesis)") - == "out of paranthesis " - ) - - -def test_get_version_details(): - assert get_version_details("10.0.1") == { - "major": "10", - "minor": "0", - "patch": "1", - "stage": "stable", - } - - -def test_is_tag_on_branch(): - current_branch = get_current_branch(os.getcwd()) - run("git checkout develop") - assert is_tag_on_branch("v0.8.3", "develop") - assert not is_tag_on_branch("v1.8.3", "develop") - run(f"git checkout {current_branch}") - - -def test_replace_line_in_file(): - replace_str = "Summary: Replace Test" - copy_file_path = f"{TEST_BASE_PATH}/files/citus_copy.spec" - copyfile(f"{TEST_BASE_PATH}/files/citus.spec", copy_file_path) - replace_line_in_file(copy_file_path, r"^Summary: *", replace_str) - try: - with open( - copy_file_path, - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - content = reader.read() - lines = content.splitlines() - assert lines[6] == replace_str - finally: - os.remove(copy_file_path) - - -def test_get_upcoming_minor_version(): - assert get_upcoming_minor_version("10.1.0") == "10.2" - - -def test_get_last_commit_message(): - current_branch_name = get_current_branch(os.getcwd()) - test_branch_name = f"test{uuid.uuid4()}" - run(f"git checkout -b {test_branch_name}") - try: - with open( - test_branch_name, - "w", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as writer: - writer.write("Test content") - run("git add .") - commit_message = f"Test message for {test_branch_name}" - run(f"git commit -m '{commit_message}'") - assert get_last_commit_message(os.getcwd()) == f"{commit_message}\n" - finally: - run(f"git checkout {current_branch_name}") - run(f"git branch -D {test_branch_name}") - - -def test_local_branch_exist(): - run("git fetch") - current_branch_name = get_current_branch(os.getcwd()) - branch_name = "develop-local-test" - assert remote_branch_exists("develop", os.getcwd()) - assert not remote_branch_exists("develop2", os.getcwd()) - try: - run(f"git checkout -b {branch_name}") - assert local_branch_exists(branch_name, os.getcwd()) - run(f"git checkout {current_branch_name} ") - finally: - run(f"git branch -D {branch_name}") - - assert not remote_branch_exists("develop_test", os.getcwd()) - - -def test_remote_branch_exist(): - run("git fetch") - assert remote_branch_exists("develop", os.getcwd()) - assert not remote_branch_exists(f"develop{uuid.uuid4()}", os.getcwd()) - - -def test_get_minor_version(): - assert get_minor_version("10.0.3") == "10.0" - - -def test_get_patch_version_regex(): - assert get_patch_version_regex("10.0.3") == r"^10\.0\.\d{1,3}$" - - -def test_append_line_in_file(): - test_file = "test_append.txt" - try: - with open( - test_file, - "a", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as writer: - writer.write("Test line 1\n") - writer.write("Test line 2\n") - writer.write("Test line 3\n") - writer.write("Test line 4\n") - writer.write("Test line 5\n") - writer.write("Test line 6\n") - writer.write("Test line 7\n") - writer.write("Test line 8\n") - append_line_in_file(test_file, "^Test line 1", "Test line 1.5") - append_line_in_file(test_file, "^Test line 2", "Test line 2.5") - append_line_in_file(test_file, "^Test line 5", "Test line 5.5") - - with open( - test_file, - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - lines = reader.readlines() - assert len(lines) == 11 - assert lines[0] == "Test line 1\n" - assert lines[1] == "Test line 1.5\n" - assert lines[2] == "Test line 2\n" - assert lines[3] == "Test line 2.5\n" - finally: - os.remove(test_file) - - -def test_prepend_line_in_file(): - test_file = "test_prepend.txt" - try: - with open( - test_file, - "a", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as writer: - writer.write("Test line 1\n") - writer.write("Test line 2\n") - writer.write("Test line 3\n") - writer.write("Test line 4\n") - writer.write("Test line 5\n") - writer.write("Test line 6\n") - writer.write("Test line 7\n") - writer.write("Test line 8\n") - prepend_line_in_file(test_file, "^Test line 1", "Test line 0.5") - prepend_line_in_file(test_file, "^Test line 2", "Test line 1.5") - prepend_line_in_file(test_file, "^Test line 5", "Test line 4.5") - - with open( - test_file, - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - lines = reader.readlines() - assert len(lines) == 11 - assert lines[0] == "Test line 0.5\n" - assert lines[1] == "Test line 1\n" - assert lines[2] == "Test line 1.5\n" - assert lines[3] == "Test line 2\n" - finally: - os.remove(test_file) - - -def test_getprs(): - # created at is not seen on Github. Should be checked on API result - g = Github(GITHUB_TOKEN) - repository = g.get_repo("citusdata/citus") - prs = get_prs_for_patch_release( - repository, - datetime.strptime("2021.02.26", "%Y.%m.%d"), - "master", - datetime.strptime("2021.03.02", "%Y.%m.%d"), - ) - assert len(prs) == 6 - assert prs[0].number == 4748 - - -def test_getprs_with_backlog_label(): - g = Github(GITHUB_TOKEN) - repository = g.get_repo("citusdata/citus") - prs = get_prs_for_patch_release( - repository, - datetime.strptime("2021.02.20", "%Y.%m.%d"), - "master", - datetime.strptime("2021.02.27", "%Y.%m.%d"), - ) - prs_backlog = filter_prs_by_label(prs, "backport") - assert len(prs_backlog) == 1 - assert prs_backlog[0].number == 4746 - - -def test_process_template_file(): - content = process_template_file( - "10.0.3", - f"{BASE_PATH}/templates", - "docker/alpine/alpine.tmpl.dockerfile", - "13.2", - ) - with open( - f"{TEST_BASE_PATH}/files/verify/expected_alpine_10.0.3.txt", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - expected_content = reader.read() - assert expected_content == content - - -def test_remove_prefix(): - assert remove_prefix("test_prefix", "test_") == "prefix" - assert remove_prefix("test_prefix", "part") == "test_prefix" - - -def test_delete_rpm_key_by_name(): - delete_all_gpg_keys_by_name(TEST_GPG_KEY_NAME) - generate_new_gpg_key(f"{TEST_BASE_PATH}/files/gpg/packaging_with_passphrase.gpg") - fingerprints = get_gpg_fingerprints_by_name(TEST_GPG_KEY_NAME) - assert len(fingerprints) > 0 - define_rpm_public_key_to_machine(fingerprints[0]) - delete_rpm_key_by_name(TEST_GPG_KEY_NAME) - result = run_with_output("rpm -q gpg-pubkey") - output = result.stdout.decode("ascii") - key_lines = output.splitlines() - for key_line in key_lines: - assert not rpm_key_matches_summary(key_line, TEST_GPG_KEY_NAME) - - -def test_get_supported_postgres_versions(): - postgres_release_versions_10_0_0 = get_supported_postgres_release_versions( - f"{TEST_BASE_PATH}/files/postgres-matrix/postgres-matrix-success.yml", "10.0.0" - ) - assert postgres_release_versions_10_0_0 == (["11", "12", "13"]) - - postgres_release_versions_9_2_1 = get_supported_postgres_release_versions( - f"{TEST_BASE_PATH}/files/postgres-matrix/postgres-matrix-success.yml", "9.2.1" - ) - assert postgres_release_versions_9_2_1 == (["11", "12"]) - - postgres_release_versions_10_1_1 = get_supported_postgres_release_versions( - f"{TEST_BASE_PATH}/files/postgres-matrix/postgres-matrix-success.yml", "10.1.1" - ) - assert postgres_release_versions_10_1_1 == (["12", "13"]) - - postgres_release_versions_7_2_1 = get_supported_postgres_release_versions( - f"{TEST_BASE_PATH}/files/postgres-matrix/postgres-matrix-success.yml", "7.2.1" - ) - assert postgres_release_versions_7_2_1 == (["10", "11"]) - - postgres_release_versions_10_2_0 = get_supported_postgres_release_versions( - f"{TEST_BASE_PATH}/files/postgres-matrix/postgres-matrix-success.yml", "10.2.0" - ) - assert postgres_release_versions_10_2_0 == (["12", "13", "14"]) - - postgres_release_versions_10_2_1 = get_supported_postgres_release_versions( - f"{TEST_BASE_PATH}/files/postgres-matrix/postgres-matrix-success.yml", "10.2.1" - ) - assert postgres_release_versions_10_2_1 == (["12", "13", "14"]) - - postgres_nightly_versions_10_2_1 = get_supported_postgres_nightly_versions( - f"{TEST_BASE_PATH}/files/postgres-matrix/postgres-matrix-success.yml" - ) - assert postgres_nightly_versions_10_2_1 == (["12", "13", "14"]) +import os +import uuid +from datetime import datetime +from shutil import copyfile + +import pathlib2 +from github import Github + +from .test_utils import generate_new_gpg_key +from ..common_tool_methods import ( + DEFAULT_ENCODING_FOR_FILE_HANDLING, + DEFAULT_UNICODE_ERROR_HANDLER, + append_line_in_file, + define_rpm_public_key_to_machine, + delete_all_gpg_keys_by_name, + delete_rpm_key_by_name, + filter_prs_by_label, + find_nth_matching_line_and_line_number, + find_nth_occurrence_position, + get_current_branch, + get_gpg_fingerprints_by_name, + get_last_commit_message, + get_minor_version, + get_patch_version_regex, + get_project_version_from_tag_name, + get_prs_for_patch_release, + get_supported_postgres_release_versions, + get_supported_postgres_nightly_versions, + get_upcoming_minor_version, + get_version_details, + is_major_release, + is_tag_on_branch, + local_branch_exists, + prepend_line_in_file, + process_template_file, + remote_branch_exists, + remove_prefix, + remove_text_with_parenthesis, + replace_line_in_file, + rpm_key_matches_summary, + run, + run_with_output, + str_array_to_str, +) + +GITHUB_TOKEN = os.getenv("GH_TOKEN") +BASE_PATH = pathlib2.Path(__file__).parents[1] +TEST_BASE_PATH = pathlib2.Path(__file__).parent.absolute() +TEST_GPG_KEY_NAME = "Citus Data " + + +def test_find_nth_occurrence_position(): + assert find_nth_occurrence_position("foofoo foofoo", "foofoo", 2) == 7 + + +def test_find_nth_matching_line_number_by_regex(): + assert ( + find_nth_matching_line_and_line_number( + "citusx\n citusx\ncitusx", "^citusx$", 2 + )[0] + == 2 + ) + assert ( + find_nth_matching_line_and_line_number( + "citusx\n citusx\ncitusx", "^citusy$", 2 + )[0] + == -1 + ) + + +def test_is_major_release(): + assert is_major_release("10.0.0") + assert not is_major_release("10.0.1") + + +def test_get_project_version_from_tag_name(): + tag_name = "v10.0.3" + assert get_project_version_from_tag_name(tag_name) == "10.0.3" + + +def test_str_array_to_str(): + assert str_array_to_str(["1", "2", "3", "4"]) == "1\n2\n3\n4\n" + + +def test_run(): + result = run("echo 'Run' method is performing fine ") + assert result.returncode == 0 + + +def test_remove_paranthesis_from_string(): + assert ( + remove_text_with_parenthesis("out of paranthesis (inside paranthesis)") + == "out of paranthesis " + ) + + +def test_get_version_details(): + assert get_version_details("10.0.1") == { + "major": "10", + "minor": "0", + "patch": "1", + "stage": "stable", + } + + +def test_is_tag_on_branch(): + current_branch = get_current_branch(os.getcwd()) + run("git checkout develop") + assert is_tag_on_branch("v0.8.3", "develop") + assert not is_tag_on_branch("v1.8.3", "develop") + run(f"git checkout {current_branch}") + + +def test_replace_line_in_file(): + replace_str = "Summary: Replace Test" + copy_file_path = f"{TEST_BASE_PATH}/files/citus_copy.spec" + copyfile(f"{TEST_BASE_PATH}/files/citus.spec", copy_file_path) + replace_line_in_file(copy_file_path, r"^Summary: *", replace_str) + try: + with open( + copy_file_path, + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + content = reader.read() + lines = content.splitlines() + assert lines[6] == replace_str + finally: + os.remove(copy_file_path) + + +def test_get_upcoming_minor_version(): + assert get_upcoming_minor_version("10.1.0") == "10.2" + + +def test_get_last_commit_message(): + current_branch_name = get_current_branch(os.getcwd()) + test_branch_name = f"test{uuid.uuid4()}" + run(f"git checkout -b {test_branch_name}") + try: + with open( + test_branch_name, + "w", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as writer: + writer.write("Test content") + run("git add .") + commit_message = f"Test message for {test_branch_name}" + run(f"git commit -m '{commit_message}'") + assert get_last_commit_message(os.getcwd()) == f"{commit_message}\n" + finally: + run(f"git checkout {current_branch_name}") + run(f"git branch -D {test_branch_name}") + + +def test_local_branch_exist(): + run("git fetch") + current_branch_name = get_current_branch(os.getcwd()) + branch_name = "develop-local-test" + assert remote_branch_exists("develop", os.getcwd()) + assert not remote_branch_exists("develop2", os.getcwd()) + try: + run(f"git checkout -b {branch_name}") + assert local_branch_exists(branch_name, os.getcwd()) + run(f"git checkout {current_branch_name} ") + finally: + run(f"git branch -D {branch_name}") + + assert not remote_branch_exists("develop_test", os.getcwd()) + + +def test_remote_branch_exist(): + run("git fetch") + assert remote_branch_exists("develop", os.getcwd()) + assert not remote_branch_exists(f"develop{uuid.uuid4()}", os.getcwd()) + + +def test_get_minor_version(): + assert get_minor_version("10.0.3") == "10.0" + + +def test_get_patch_version_regex(): + assert get_patch_version_regex("10.0.3") == r"^10\.0\.\d{1,3}$" + + +def test_append_line_in_file(): + test_file = "test_append.txt" + try: + with open( + test_file, + "a", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as writer: + writer.write("Test line 1\n") + writer.write("Test line 2\n") + writer.write("Test line 3\n") + writer.write("Test line 4\n") + writer.write("Test line 5\n") + writer.write("Test line 6\n") + writer.write("Test line 7\n") + writer.write("Test line 8\n") + append_line_in_file(test_file, "^Test line 1", "Test line 1.5") + append_line_in_file(test_file, "^Test line 2", "Test line 2.5") + append_line_in_file(test_file, "^Test line 5", "Test line 5.5") + + with open( + test_file, + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + lines = reader.readlines() + assert len(lines) == 11 + assert lines[0] == "Test line 1\n" + assert lines[1] == "Test line 1.5\n" + assert lines[2] == "Test line 2\n" + assert lines[3] == "Test line 2.5\n" + finally: + os.remove(test_file) + + +def test_prepend_line_in_file(): + test_file = "test_prepend.txt" + try: + with open( + test_file, + "a", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as writer: + writer.write("Test line 1\n") + writer.write("Test line 2\n") + writer.write("Test line 3\n") + writer.write("Test line 4\n") + writer.write("Test line 5\n") + writer.write("Test line 6\n") + writer.write("Test line 7\n") + writer.write("Test line 8\n") + prepend_line_in_file(test_file, "^Test line 1", "Test line 0.5") + prepend_line_in_file(test_file, "^Test line 2", "Test line 1.5") + prepend_line_in_file(test_file, "^Test line 5", "Test line 4.5") + + with open( + test_file, + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + lines = reader.readlines() + assert len(lines) == 11 + assert lines[0] == "Test line 0.5\n" + assert lines[1] == "Test line 1\n" + assert lines[2] == "Test line 1.5\n" + assert lines[3] == "Test line 2\n" + finally: + os.remove(test_file) + + +def test_getprs(): + # created at is not seen on Github. Should be checked on API result + g = Github(GITHUB_TOKEN) + repository = g.get_repo("citusdata/citus") + prs = get_prs_for_patch_release( + repository, + datetime.strptime("2021.02.26", "%Y.%m.%d"), + "master", + datetime.strptime("2021.03.02", "%Y.%m.%d"), + ) + assert len(prs) == 6 + assert prs[0].number == 4748 + + +def test_getprs_with_backlog_label(): + g = Github(GITHUB_TOKEN) + repository = g.get_repo("citusdata/citus") + prs = get_prs_for_patch_release( + repository, + datetime.strptime("2021.02.20", "%Y.%m.%d"), + "master", + datetime.strptime("2021.02.27", "%Y.%m.%d"), + ) + prs_backlog = filter_prs_by_label(prs, "backport") + assert len(prs_backlog) == 1 + assert prs_backlog[0].number == 4746 + + +def test_process_template_file(): + content = process_template_file( + "10.0.3", + f"{BASE_PATH}/templates", + "docker/alpine/alpine.tmpl.dockerfile", + "13.2", + ) + with open( + f"{TEST_BASE_PATH}/files/verify/expected_alpine_10.0.3.txt", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + expected_content = reader.read() + assert expected_content == content + + +def test_remove_prefix(): + assert remove_prefix("test_prefix", "test_") == "prefix" + assert remove_prefix("test_prefix", "part") == "test_prefix" + + +def test_delete_rpm_key_by_name(): + delete_all_gpg_keys_by_name(TEST_GPG_KEY_NAME) + generate_new_gpg_key(f"{TEST_BASE_PATH}/files/gpg/packaging_with_passphrase.gpg") + fingerprints = get_gpg_fingerprints_by_name(TEST_GPG_KEY_NAME) + assert len(fingerprints) > 0 + define_rpm_public_key_to_machine(fingerprints[0]) + delete_rpm_key_by_name(TEST_GPG_KEY_NAME) + result = run_with_output("rpm -q gpg-pubkey") + output = result.stdout.decode("ascii") + key_lines = output.splitlines() + for key_line in key_lines: + assert not rpm_key_matches_summary(key_line, TEST_GPG_KEY_NAME) + + +def test_get_supported_postgres_versions(): + postgres_release_versions_10_0_0 = get_supported_postgres_release_versions( + f"{TEST_BASE_PATH}/files/postgres-matrix/postgres-matrix-success.yml", "10.0.0" + ) + assert postgres_release_versions_10_0_0 == (["11", "12", "13"]) + + postgres_release_versions_9_2_1 = get_supported_postgres_release_versions( + f"{TEST_BASE_PATH}/files/postgres-matrix/postgres-matrix-success.yml", "9.2.1" + ) + assert postgres_release_versions_9_2_1 == (["11", "12"]) + + postgres_release_versions_10_1_1 = get_supported_postgres_release_versions( + f"{TEST_BASE_PATH}/files/postgres-matrix/postgres-matrix-success.yml", "10.1.1" + ) + assert postgres_release_versions_10_1_1 == (["12", "13"]) + + postgres_release_versions_7_2_1 = get_supported_postgres_release_versions( + f"{TEST_BASE_PATH}/files/postgres-matrix/postgres-matrix-success.yml", "7.2.1" + ) + assert postgres_release_versions_7_2_1 == (["10", "11"]) + + postgres_release_versions_10_2_0 = get_supported_postgres_release_versions( + f"{TEST_BASE_PATH}/files/postgres-matrix/postgres-matrix-success.yml", "10.2.0" + ) + assert postgres_release_versions_10_2_0 == (["12", "13", "14"]) + + postgres_release_versions_10_2_1 = get_supported_postgres_release_versions( + f"{TEST_BASE_PATH}/files/postgres-matrix/postgres-matrix-success.yml", "10.2.1" + ) + assert postgres_release_versions_10_2_1 == (["12", "13", "14"]) + + postgres_nightly_versions_10_2_1 = get_supported_postgres_nightly_versions( + f"{TEST_BASE_PATH}/files/postgres-matrix/postgres-matrix-success.yml" + ) + assert postgres_nightly_versions_10_2_1 == (["12", "13", "14"]) diff --git a/packaging_automation/tests/test_docker_statistics_collector.py b/packaging_automation/tests/test_docker_statistics_collector.py index d89669f3..98ac57bd 100644 --- a/packaging_automation/tests/test_docker_statistics_collector.py +++ b/packaging_automation/tests/test_docker_statistics_collector.py @@ -1,74 +1,74 @@ -import os -from datetime import datetime, timedelta - -from sqlalchemy import create_engine, text -from sqlalchemy.orm import sessionmaker - -from ..dbconfig import Base, db_connection_string, DbParams -from ..docker_statistics_collector import fetch_and_store_docker_statistics, DockerStats - -DB_USER_NAME = os.getenv("DB_USER_NAME") -DB_PASSWORD = os.getenv("DB_PASSWORD") -DB_HOST_AND_PORT = os.getenv("DB_HOST_AND_PORT") -DB_NAME = os.getenv("DB_NAME") - - -def test_docker_statistics_collector(): - test_day_shift_index = -2 - test_pull_count_shift = 205 - db_params = DbParams( - user_name=DB_USER_NAME, - password=DB_PASSWORD, - host_and_port=DB_HOST_AND_PORT, - db_name=DB_NAME, - ) - db = create_engine(db_connection_string(db_params=db_params, is_test=True)) - sql = text(f"DROP TABLE IF EXISTS {DockerStats.__tablename__}") - conn = db.connect() - conn.execute(sql) - conn.commit() - conn.close() - Session = sessionmaker(db) - session = Session() - fetch_and_store_docker_statistics( - "citus", - db_parameters=db_params, - is_test=True, - test_day_shift_index=test_day_shift_index, - ) - first_day = datetime.today() + timedelta(days=test_day_shift_index) - first_day_record = ( - session.query(DockerStats).filter_by(stat_date=first_day.date()).first() - ) - fetch_and_store_docker_statistics( - "citus", - db_parameters=db_params, - is_test=True, - test_total_pull_count=first_day_record.total_pull_count + test_pull_count_shift, - ) - Base.metadata.create_all(db) - - second_day = datetime.today() + timedelta(days=test_day_shift_index + 1) - third_day = datetime.today() - - second_day_record = ( - session.query(DockerStats).filter_by(stat_date=second_day.date()).first() - ) - third_day_record = ( - session.query(DockerStats).filter_by(stat_date=third_day.date()).first() - ) - - pull_count_diff = ( - third_day_record.total_pull_count - first_day_record.total_pull_count - ) - - assert ( - third_day_record - and second_day_record - and (third_day_record.total_pull_count == second_day_record.total_pull_count) - and ( - third_day_record.daily_pull_count + second_day_record.daily_pull_count - == pull_count_diff - ) - and (pull_count_diff == test_pull_count_shift) - ) +import os +from datetime import datetime, timedelta + +from sqlalchemy import create_engine, text +from sqlalchemy.orm import sessionmaker + +from ..dbconfig import Base, db_connection_string, DbParams +from ..docker_statistics_collector import fetch_and_store_docker_statistics, DockerStats + +DB_USER_NAME = os.getenv("DB_USER_NAME") +DB_PASSWORD = os.getenv("DB_PASSWORD") +DB_HOST_AND_PORT = os.getenv("DB_HOST_AND_PORT") +DB_NAME = os.getenv("DB_NAME") + + +def test_docker_statistics_collector(): + test_day_shift_index = -2 + test_pull_count_shift = 205 + db_params = DbParams( + user_name=DB_USER_NAME, + password=DB_PASSWORD, + host_and_port=DB_HOST_AND_PORT, + db_name=DB_NAME, + ) + db = create_engine(db_connection_string(db_params=db_params, is_test=True)) + sql = text(f"DROP TABLE IF EXISTS {DockerStats.__tablename__}") + conn = db.connect() + conn.execute(sql) + conn.commit() + conn.close() + Session = sessionmaker(db) + session = Session() + fetch_and_store_docker_statistics( + "citus", + db_parameters=db_params, + is_test=True, + test_day_shift_index=test_day_shift_index, + ) + first_day = datetime.today() + timedelta(days=test_day_shift_index) + first_day_record = ( + session.query(DockerStats).filter_by(stat_date=first_day.date()).first() + ) + fetch_and_store_docker_statistics( + "citus", + db_parameters=db_params, + is_test=True, + test_total_pull_count=first_day_record.total_pull_count + test_pull_count_shift, + ) + Base.metadata.create_all(db) + + second_day = datetime.today() + timedelta(days=test_day_shift_index + 1) + third_day = datetime.today() + + second_day_record = ( + session.query(DockerStats).filter_by(stat_date=second_day.date()).first() + ) + third_day_record = ( + session.query(DockerStats).filter_by(stat_date=third_day.date()).first() + ) + + pull_count_diff = ( + third_day_record.total_pull_count - first_day_record.total_pull_count + ) + + assert ( + third_day_record + and second_day_record + and (third_day_record.total_pull_count == second_day_record.total_pull_count) + and ( + third_day_record.daily_pull_count + second_day_record.daily_pull_count + == pull_count_diff + ) + and (pull_count_diff == test_pull_count_shift) + ) diff --git a/packaging_automation/tests/test_github_statistics_collector.py b/packaging_automation/tests/test_github_statistics_collector.py index 56bbf2f7..9a0a3e9d 100644 --- a/packaging_automation/tests/test_github_statistics_collector.py +++ b/packaging_automation/tests/test_github_statistics_collector.py @@ -1,88 +1,88 @@ -import os -from datetime import datetime - -from sqlalchemy import text, create_engine - -from ..dbconfig import db_connection_string, DbParams, db_session -from ..github_statistics_collector import ( - fetch_and_store_github_stats, - GithubCloneStatsTransactionsDetail, - GithubCloneStatsTransactionsMain, - GithubCloneStats, - GitHubReleases, -) - -DB_USER_NAME = os.getenv("DB_USER_NAME") -DB_PASSWORD = os.getenv("DB_PASSWORD") -DB_HOST_AND_PORT = os.getenv("DB_HOST_AND_PORT") -DB_NAME = os.getenv("DB_NAME") -GH_TOKEN = os.getenv("GH_TOKEN") - -ORGANIZATION_NAME = "citusdata" -REPO_NAME = "citus" - - -def test_github_stats_collector(): - db_params = DbParams( - user_name=DB_USER_NAME, - password=DB_PASSWORD, - host_and_port=DB_HOST_AND_PORT, - db_name=DB_NAME, - ) - db = create_engine(db_connection_string(db_params=db_params, is_test=True)) - conn = db.connect() - conn.execute( - text(f"DROP TABLE IF EXISTS {GithubCloneStatsTransactionsDetail.__tablename__}") - ) - conn.execute( - text(f"DROP TABLE IF EXISTS {GithubCloneStatsTransactionsMain.__tablename__}") - ) - conn.execute(text(f"DROP TABLE IF EXISTS {GithubCloneStats.__tablename__}")) - - conn.commit() - - conn.close() - - fetch_and_store_github_stats( - organization_name=ORGANIZATION_NAME, - repo_name=REPO_NAME, - github_token=GH_TOKEN, - db_parameters=db_params, - is_test=True, - ) - session = db_session(db_params=db_params, is_test=True) - main_records = session.query(GithubCloneStatsTransactionsMain).all() - assert len(main_records) == 1 - detail_records = session.query(GithubCloneStatsTransactionsDetail).all() - assert len(detail_records) >= 13 - records = session.query(GithubCloneStats).all() - previous_record_length = len(records) - assert previous_record_length >= 13 - first_record = session.query(GithubCloneStats).first() - session.delete(first_record) - session.commit() - records = session.query(GithubCloneStats).all() - assert previous_record_length - len(records) == 1 - fetch_and_store_github_stats( - organization_name=ORGANIZATION_NAME, - repo_name=REPO_NAME, - github_token=GH_TOKEN, - db_parameters=db_params, - is_test=True, - ) - - main_records = session.query(GithubCloneStatsTransactionsMain).all() - assert len(main_records) == 2 - detail_records = session.query(GithubCloneStatsTransactionsDetail).all() - assert len(detail_records) >= 26 - - records = session.query(GithubCloneStats).all() - assert len(records) == previous_record_length - today_record = session.query(GithubCloneStats).filter_by( - clone_date=datetime.today() - ) - assert not today_record.first() - - release_records = session.query(GitHubReleases).filter_by(tag_name="v10.0.3").all() - - assert len(release_records) > 0 +import os +from datetime import datetime + +from sqlalchemy import text, create_engine + +from ..dbconfig import db_connection_string, DbParams, db_session +from ..github_statistics_collector import ( + fetch_and_store_github_stats, + GithubCloneStatsTransactionsDetail, + GithubCloneStatsTransactionsMain, + GithubCloneStats, + GitHubReleases, +) + +DB_USER_NAME = os.getenv("DB_USER_NAME") +DB_PASSWORD = os.getenv("DB_PASSWORD") +DB_HOST_AND_PORT = os.getenv("DB_HOST_AND_PORT") +DB_NAME = os.getenv("DB_NAME") +GH_TOKEN = os.getenv("GH_TOKEN") + +ORGANIZATION_NAME = "citusdata" +REPO_NAME = "citus" + + +def test_github_stats_collector(): + db_params = DbParams( + user_name=DB_USER_NAME, + password=DB_PASSWORD, + host_and_port=DB_HOST_AND_PORT, + db_name=DB_NAME, + ) + db = create_engine(db_connection_string(db_params=db_params, is_test=True)) + conn = db.connect() + conn.execute( + text(f"DROP TABLE IF EXISTS {GithubCloneStatsTransactionsDetail.__tablename__}") + ) + conn.execute( + text(f"DROP TABLE IF EXISTS {GithubCloneStatsTransactionsMain.__tablename__}") + ) + conn.execute(text(f"DROP TABLE IF EXISTS {GithubCloneStats.__tablename__}")) + + conn.commit() + + conn.close() + + fetch_and_store_github_stats( + organization_name=ORGANIZATION_NAME, + repo_name=REPO_NAME, + github_token=GH_TOKEN, + db_parameters=db_params, + is_test=True, + ) + session = db_session(db_params=db_params, is_test=True) + main_records = session.query(GithubCloneStatsTransactionsMain).all() + assert len(main_records) == 1 + detail_records = session.query(GithubCloneStatsTransactionsDetail).all() + assert len(detail_records) >= 13 + records = session.query(GithubCloneStats).all() + previous_record_length = len(records) + assert previous_record_length >= 13 + first_record = session.query(GithubCloneStats).first() + session.delete(first_record) + session.commit() + records = session.query(GithubCloneStats).all() + assert previous_record_length - len(records) == 1 + fetch_and_store_github_stats( + organization_name=ORGANIZATION_NAME, + repo_name=REPO_NAME, + github_token=GH_TOKEN, + db_parameters=db_params, + is_test=True, + ) + + main_records = session.query(GithubCloneStatsTransactionsMain).all() + assert len(main_records) == 2 + detail_records = session.query(GithubCloneStatsTransactionsDetail).all() + assert len(detail_records) >= 26 + + records = session.query(GithubCloneStats).all() + assert len(records) == previous_record_length + today_record = session.query(GithubCloneStats).filter_by( + clone_date=datetime.today() + ) + assert not today_record.first() + + release_records = session.query(GitHubReleases).filter_by(tag_name="v10.0.3").all() + + assert len(release_records) > 0 diff --git a/packaging_automation/tests/test_homebrew_statistics_collector.py b/packaging_automation/tests/test_homebrew_statistics_collector.py index 21d7b8a4..d189a5c8 100644 --- a/packaging_automation/tests/test_homebrew_statistics_collector.py +++ b/packaging_automation/tests/test_homebrew_statistics_collector.py @@ -1,38 +1,38 @@ -import os - -from sqlalchemy import text, create_engine - -from ..dbconfig import db_session, DbParams, db_connection_string -from ..homebrew_statistics_collector import fetch_and_save_homebrew_stats, HomebrewStats - -DB_USER_NAME = os.getenv("DB_USER_NAME") -DB_PASSWORD = os.getenv("DB_PASSWORD") -DB_HOST_AND_PORT = os.getenv("DB_HOST_AND_PORT") -DB_NAME = os.getenv("DB_NAME") - -db_parameters = DbParams( - user_name=DB_USER_NAME, - password=DB_PASSWORD, - host_and_port=DB_HOST_AND_PORT, - db_name=DB_NAME, -) - - -def test_fetch_and_save_homebrew_stats(): - db = create_engine(db_connection_string(db_params=db_parameters, is_test=True)) - conn = db.connect() - conn.execute(text(f"DROP TABLE IF EXISTS {HomebrewStats.__tablename__}")) - conn.commit() - conn.close() - - session = db_session(db_params=db_parameters, is_test=True) - - fetch_and_save_homebrew_stats(db_params=db_parameters, is_test=True) - - records = session.query(HomebrewStats).all() - assert len(records) == 1 - - fetch_and_save_homebrew_stats(db_params=db_parameters, is_test=True) - - records = session.query(HomebrewStats).all() - assert len(records) == 1 +import os + +from sqlalchemy import text, create_engine + +from ..dbconfig import db_session, DbParams, db_connection_string +from ..homebrew_statistics_collector import fetch_and_save_homebrew_stats, HomebrewStats + +DB_USER_NAME = os.getenv("DB_USER_NAME") +DB_PASSWORD = os.getenv("DB_PASSWORD") +DB_HOST_AND_PORT = os.getenv("DB_HOST_AND_PORT") +DB_NAME = os.getenv("DB_NAME") + +db_parameters = DbParams( + user_name=DB_USER_NAME, + password=DB_PASSWORD, + host_and_port=DB_HOST_AND_PORT, + db_name=DB_NAME, +) + + +def test_fetch_and_save_homebrew_stats(): + db = create_engine(db_connection_string(db_params=db_parameters, is_test=True)) + conn = db.connect() + conn.execute(text(f"DROP TABLE IF EXISTS {HomebrewStats.__tablename__}")) + conn.commit() + conn.close() + + session = db_session(db_params=db_parameters, is_test=True) + + fetch_and_save_homebrew_stats(db_params=db_parameters, is_test=True) + + records = session.query(HomebrewStats).all() + assert len(records) == 1 + + fetch_and_save_homebrew_stats(db_params=db_parameters, is_test=True) + + records = session.query(HomebrewStats).all() + assert len(records) == 1 diff --git a/packaging_automation/tests/test_package_cloud_statistics_collector.py b/packaging_automation/tests/test_package_cloud_statistics_collector.py index 8876ba87..29de9896 100644 --- a/packaging_automation/tests/test_package_cloud_statistics_collector.py +++ b/packaging_automation/tests/test_package_cloud_statistics_collector.py @@ -1,90 +1,90 @@ -import json -import os - -from sqlalchemy import text, create_engine - -from ..common_tool_methods import stat_get_request -from ..dbconfig import db_session, DbParams, db_connection_string -from ..package_cloud_statistics_collector import ( - fetch_and_save_package_cloud_stats, - PackageCloudRepo, - PackageCloudOrganization, - PackageCloudDownloadStats, - package_list_with_pagination_request_address, - RequestType, - is_ignored_package, - PackageCloudParams, - ParallelExecutionParams, -) - -DB_USER_NAME = os.getenv("DB_USER_NAME") -DB_PASSWORD = os.getenv("DB_PASSWORD") -DB_HOST_AND_PORT = os.getenv("DB_HOST_AND_PORT") -DB_NAME = os.getenv("DB_NAME") -PACKAGE_CLOUD_API_TOKEN = os.getenv("PACKAGE_CLOUD_API_TOKEN") -PACKAGE_CLOUD_ADMIN_API_TOKEN = os.getenv("PACKAGE_CLOUD_ADMIN_API_TOKEN") -REPO = PackageCloudRepo.azure -ORGANIZATION = PackageCloudOrganization.citusdata -db_parameters = DbParams( - user_name=DB_USER_NAME, - password=DB_PASSWORD, - host_and_port=DB_HOST_AND_PORT, - db_name=DB_NAME, -) -# 7 Records are fetched for each package from package cloud. To check the record count, we need to multiply package -# count with 7 -PACKAGE_SAVED_HISTORIC_RECORD_COUNT = 7 - -PACKAGE_CLOUD_PARAMETERS = PackageCloudParams( - admin_api_token=PACKAGE_CLOUD_ADMIN_API_TOKEN, - standard_api_token=PACKAGE_CLOUD_API_TOKEN, - organization=ORGANIZATION, - repo_name=REPO, -) - - -def test_fetch_and_save_package_cloud_stats(): - db = create_engine(db_connection_string(db_params=db_parameters, is_test=True)) - conn = db.connect() - conn.execute( - text(f"DROP TABLE IF EXISTS {PackageCloudDownloadStats.__tablename__}") - ) - conn.commit() - conn.close() - - session = db_session(db_params=db_parameters, is_test=True) - page_record_count = 3 - parallel_count = 3 - - for index in range(0, parallel_count): - parallel_exec_parameters = ParallelExecutionParams( - parallel_count=parallel_count, - parallel_exec_index=index, - page_record_count=page_record_count, - ) - fetch_and_save_package_cloud_stats( - db_params=db_parameters, - package_cloud_params=PACKAGE_CLOUD_PARAMETERS, - parallel_execution_params=parallel_exec_parameters, - is_test=True, - save_records_with_download_count_zero=True, - ) - - records = session.query(PackageCloudDownloadStats).all() - - assert len(records) > 0 - - -def get_filtered_package_count(session) -> int: - # Since package count for our test repo is lower than 500, we get the total package details by getting all the - # packages in one call - result = stat_get_request( - package_list_with_pagination_request_address(PACKAGE_CLOUD_PARAMETERS, 1, 500), - RequestType.package_cloud_list_package, - session, - ) - package_info_list = json.loads(result.content) - package_list = list( - filter(lambda p: not is_ignored_package(p["name"]), package_info_list) - ) - return len(package_list) +import json +import os + +from sqlalchemy import text, create_engine + +from ..common_tool_methods import stat_get_request +from ..dbconfig import db_session, DbParams, db_connection_string +from ..package_cloud_statistics_collector import ( + fetch_and_save_package_cloud_stats, + PackageCloudRepo, + PackageCloudOrganization, + PackageCloudDownloadStats, + package_list_with_pagination_request_address, + RequestType, + is_ignored_package, + PackageCloudParams, + ParallelExecutionParams, +) + +DB_USER_NAME = os.getenv("DB_USER_NAME") +DB_PASSWORD = os.getenv("DB_PASSWORD") +DB_HOST_AND_PORT = os.getenv("DB_HOST_AND_PORT") +DB_NAME = os.getenv("DB_NAME") +PACKAGE_CLOUD_API_TOKEN = os.getenv("PACKAGE_CLOUD_API_TOKEN") +PACKAGE_CLOUD_ADMIN_API_TOKEN = os.getenv("PACKAGE_CLOUD_ADMIN_API_TOKEN") +REPO = PackageCloudRepo.azure +ORGANIZATION = PackageCloudOrganization.citusdata +db_parameters = DbParams( + user_name=DB_USER_NAME, + password=DB_PASSWORD, + host_and_port=DB_HOST_AND_PORT, + db_name=DB_NAME, +) +# 7 Records are fetched for each package from package cloud. To check the record count, we need to multiply package +# count with 7 +PACKAGE_SAVED_HISTORIC_RECORD_COUNT = 7 + +PACKAGE_CLOUD_PARAMETERS = PackageCloudParams( + admin_api_token=PACKAGE_CLOUD_ADMIN_API_TOKEN, + standard_api_token=PACKAGE_CLOUD_API_TOKEN, + organization=ORGANIZATION, + repo_name=REPO, +) + + +def test_fetch_and_save_package_cloud_stats(): + db = create_engine(db_connection_string(db_params=db_parameters, is_test=True)) + conn = db.connect() + conn.execute( + text(f"DROP TABLE IF EXISTS {PackageCloudDownloadStats.__tablename__}") + ) + conn.commit() + conn.close() + + session = db_session(db_params=db_parameters, is_test=True) + page_record_count = 3 + parallel_count = 3 + + for index in range(0, parallel_count): + parallel_exec_parameters = ParallelExecutionParams( + parallel_count=parallel_count, + parallel_exec_index=index, + page_record_count=page_record_count, + ) + fetch_and_save_package_cloud_stats( + db_params=db_parameters, + package_cloud_params=PACKAGE_CLOUD_PARAMETERS, + parallel_execution_params=parallel_exec_parameters, + is_test=True, + save_records_with_download_count_zero=True, + ) + + records = session.query(PackageCloudDownloadStats).all() + + assert len(records) > 0 + + +def get_filtered_package_count(session) -> int: + # Since package count for our test repo is lower than 500, we get the total package details by getting all the + # packages in one call + result = stat_get_request( + package_list_with_pagination_request_address(PACKAGE_CLOUD_PARAMETERS, 1, 500), + RequestType.package_cloud_list_package, + session, + ) + package_info_list = json.loads(result.content) + package_list = list( + filter(lambda p: not is_ignored_package(p["name"]), package_info_list) + ) + return len(package_list) diff --git a/packaging_automation/tests/test_packaging_warning_handler.py b/packaging_automation/tests/test_packaging_warning_handler.py index 4c4e9bd9..b1054c60 100644 --- a/packaging_automation/tests/test_packaging_warning_handler.py +++ b/packaging_automation/tests/test_packaging_warning_handler.py @@ -1,207 +1,207 @@ -import pathlib2 -import pytest - -from ..common_tool_methods import ( - DEFAULT_ENCODING_FOR_FILE_HANDLING, - DEFAULT_UNICODE_ERROR_HANDLER, -) -from ..packaging_warning_handler import ( - parse_ignore_lists, - PackageType, - filter_warning_lines, - get_warnings_to_be_raised, - get_error_message, - validate_output, -) - -TEST_BASE_PATH = pathlib2.Path(__file__).parent - - -def test_parse_ignore_lists(): - base_ignore_list, debian_ignore_list = parse_ignore_lists( - f"{TEST_BASE_PATH}/files/packaging_warning/packaging_ignore.yml", - PackageType.deb, - ) - assert len(base_ignore_list) == 8 and len(debian_ignore_list) == 2 - - base_ignore_list, rpm_ignore_list = parse_ignore_lists( - f"{TEST_BASE_PATH}/files/packaging_warning/packaging_ignore.yml", - PackageType.deb, - ) - assert len(base_ignore_list) == 8 and len(rpm_ignore_list) == 2 - - -def test_deb_filter_warning_lines(): - with open( - f"{TEST_BASE_PATH}/files/packaging_warning/sample_warning_build_output_deb.txt", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - lines = reader.read().splitlines() - base_warning_lines, package_specific_warning_lines = filter_warning_lines( - lines, PackageType.deb - ) - assert ( - len(base_warning_lines) == 11 and len(package_specific_warning_lines) == 7 - ) - - -def test_rpm_filter_warning_lines(): - with open( - f"{TEST_BASE_PATH}/files/packaging_warning/sample_warning_build_output_rpm.txt", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - lines = reader.read().splitlines() - base_warning_lines, package_specific_warning_lines = filter_warning_lines( - lines, PackageType.rpm - ) - assert ( - len(base_warning_lines) == 10 and len(package_specific_warning_lines) == 1 - ) - - -def test_get_base_warnings_to_be_raised(): - with open( - f"{TEST_BASE_PATH}/files/packaging_warning/sample_warning_build_output_deb.txt", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - lines = reader.read().splitlines() - base_warning_lines, _ = filter_warning_lines(lines, PackageType.deb) - base_ignore_list, _ = parse_ignore_lists( - f"{TEST_BASE_PATH}/files/packaging_warning/packaging_ignore.yml", - PackageType.deb, - ) - base_warnings_to_be_raised = get_warnings_to_be_raised( - base_ignore_list, base_warning_lines - ) - assert len(base_warnings_to_be_raised) == 1 - - -def test_get_debian_warnings_to_be_raised(): - with open( - f"{TEST_BASE_PATH}/files/packaging_warning/sample_warning_build_output_deb.txt", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - lines = reader.read().splitlines() - _, package_specific_warning_lines = filter_warning_lines(lines, PackageType.deb) - _, debian_ignore_list = parse_ignore_lists( - f"{TEST_BASE_PATH}/files/packaging_warning/packaging_ignore.yml", - PackageType.deb, - ) - debian_warnings_to_be_raised = get_warnings_to_be_raised( - debian_ignore_list, package_specific_warning_lines - ) - assert len(debian_warnings_to_be_raised) == 2 - - -def test_get_error_message(): - with open( - f"{TEST_BASE_PATH}/files/packaging_warning/sample_warning_build_output_deb.txt", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - lines = reader.read().splitlines() - base_warning_lines, debian_warning_lines = filter_warning_lines( - lines, PackageType.deb - ) - base_ignore_list, debian_ignore_list = parse_ignore_lists( - f"{TEST_BASE_PATH}/files/packaging_warning/packaging_ignore.yml", - PackageType.deb, - ) - base_warnings_to_be_raised = get_warnings_to_be_raised( - base_ignore_list, base_warning_lines - ) - debian_warnings_to_be_raised = get_warnings_to_be_raised( - debian_ignore_list, debian_warning_lines - ) - error_message = get_error_message( - base_warnings_to_be_raised, debian_warnings_to_be_raised, PackageType.deb - ) - assert ( - error_message - == "Warning lines:\nWarning: Unhandled\nDebian Warning lines:\n" - "citus-enterprise100_11.x86_64: W: invalid-date-format\n" - "citus-enterprise100_11.x86_64: E: zero-length /usr/pgsql-/usr/lib/share/extension/\n" - ) - - -def test_get_error_message_empty_package_specific_errors(): - with open( - f"{TEST_BASE_PATH}/files/packaging_warning/sample_warning_build_output_deb_only_base.txt", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - lines = reader.read().splitlines() - base_warning_lines, debian_warning_lines = filter_warning_lines( - lines, PackageType.deb - ) - base_ignore_list, debian_ignore_list = parse_ignore_lists( - f"{TEST_BASE_PATH}/files/packaging_warning/packaging_ignore.yml", - PackageType.deb, - ) - base_warnings_to_be_raised = get_warnings_to_be_raised( - base_ignore_list, base_warning_lines - ) - debian_warnings_to_be_raised = get_warnings_to_be_raised( - debian_ignore_list, debian_warning_lines - ) - error_message = get_error_message( - base_warnings_to_be_raised, debian_warnings_to_be_raised, PackageType.deb - ) - assert error_message == "Warning lines:\nWarning: Unhandled\n" - - -def test_validate_output_deb(): - with open( - f"{TEST_BASE_PATH}/files/packaging_warning/sample_warning_build_output_deb.txt", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - output = reader.read() - with pytest.raises(SystemExit): - validate_output( - output, - f"{TEST_BASE_PATH}/files/packaging_warning/packaging_ignore.yml", - PackageType.deb, - ) - - -def test_validate_output_rpm(): - with open( - f"{TEST_BASE_PATH}/files/packaging_warning/sample_warning_build_output_rpm.txt", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - output = reader.read() - with pytest.raises(SystemExit): - validate_output( - output, - f"{TEST_BASE_PATH}/files/packaging_warning/packaging_ignore_without_rpm_rules.yml", - PackageType.rpm, - ) - - -def test_validate_output_rpm_success(): - with open( - f"{TEST_BASE_PATH}/files/packaging_warning/sample_warning_build_output_rpm_success.txt", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - output = reader.read() - validate_output( - output, - f"{TEST_BASE_PATH}/files/packaging_warning/packaging_ignore.yml", - PackageType.rpm, - ) +import pathlib2 +import pytest + +from ..common_tool_methods import ( + DEFAULT_ENCODING_FOR_FILE_HANDLING, + DEFAULT_UNICODE_ERROR_HANDLER, +) +from ..packaging_warning_handler import ( + parse_ignore_lists, + PackageType, + filter_warning_lines, + get_warnings_to_be_raised, + get_error_message, + validate_output, +) + +TEST_BASE_PATH = pathlib2.Path(__file__).parent + + +def test_parse_ignore_lists(): + base_ignore_list, debian_ignore_list = parse_ignore_lists( + f"{TEST_BASE_PATH}/files/packaging_warning/packaging_ignore.yml", + PackageType.deb, + ) + assert len(base_ignore_list) == 8 and len(debian_ignore_list) == 2 + + base_ignore_list, rpm_ignore_list = parse_ignore_lists( + f"{TEST_BASE_PATH}/files/packaging_warning/packaging_ignore.yml", + PackageType.deb, + ) + assert len(base_ignore_list) == 8 and len(rpm_ignore_list) == 2 + + +def test_deb_filter_warning_lines(): + with open( + f"{TEST_BASE_PATH}/files/packaging_warning/sample_warning_build_output_deb.txt", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + lines = reader.read().splitlines() + base_warning_lines, package_specific_warning_lines = filter_warning_lines( + lines, PackageType.deb + ) + assert ( + len(base_warning_lines) == 11 and len(package_specific_warning_lines) == 7 + ) + + +def test_rpm_filter_warning_lines(): + with open( + f"{TEST_BASE_PATH}/files/packaging_warning/sample_warning_build_output_rpm.txt", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + lines = reader.read().splitlines() + base_warning_lines, package_specific_warning_lines = filter_warning_lines( + lines, PackageType.rpm + ) + assert ( + len(base_warning_lines) == 10 and len(package_specific_warning_lines) == 1 + ) + + +def test_get_base_warnings_to_be_raised(): + with open( + f"{TEST_BASE_PATH}/files/packaging_warning/sample_warning_build_output_deb.txt", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + lines = reader.read().splitlines() + base_warning_lines, _ = filter_warning_lines(lines, PackageType.deb) + base_ignore_list, _ = parse_ignore_lists( + f"{TEST_BASE_PATH}/files/packaging_warning/packaging_ignore.yml", + PackageType.deb, + ) + base_warnings_to_be_raised = get_warnings_to_be_raised( + base_ignore_list, base_warning_lines + ) + assert len(base_warnings_to_be_raised) == 1 + + +def test_get_debian_warnings_to_be_raised(): + with open( + f"{TEST_BASE_PATH}/files/packaging_warning/sample_warning_build_output_deb.txt", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + lines = reader.read().splitlines() + _, package_specific_warning_lines = filter_warning_lines(lines, PackageType.deb) + _, debian_ignore_list = parse_ignore_lists( + f"{TEST_BASE_PATH}/files/packaging_warning/packaging_ignore.yml", + PackageType.deb, + ) + debian_warnings_to_be_raised = get_warnings_to_be_raised( + debian_ignore_list, package_specific_warning_lines + ) + assert len(debian_warnings_to_be_raised) == 2 + + +def test_get_error_message(): + with open( + f"{TEST_BASE_PATH}/files/packaging_warning/sample_warning_build_output_deb.txt", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + lines = reader.read().splitlines() + base_warning_lines, debian_warning_lines = filter_warning_lines( + lines, PackageType.deb + ) + base_ignore_list, debian_ignore_list = parse_ignore_lists( + f"{TEST_BASE_PATH}/files/packaging_warning/packaging_ignore.yml", + PackageType.deb, + ) + base_warnings_to_be_raised = get_warnings_to_be_raised( + base_ignore_list, base_warning_lines + ) + debian_warnings_to_be_raised = get_warnings_to_be_raised( + debian_ignore_list, debian_warning_lines + ) + error_message = get_error_message( + base_warnings_to_be_raised, debian_warnings_to_be_raised, PackageType.deb + ) + assert ( + error_message + == "Warning lines:\nWarning: Unhandled\nDebian Warning lines:\n" + "citus-enterprise100_11.x86_64: W: invalid-date-format\n" + "citus-enterprise100_11.x86_64: E: zero-length /usr/pgsql-/usr/lib/share/extension/\n" + ) + + +def test_get_error_message_empty_package_specific_errors(): + with open( + f"{TEST_BASE_PATH}/files/packaging_warning/sample_warning_build_output_deb_only_base.txt", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + lines = reader.read().splitlines() + base_warning_lines, debian_warning_lines = filter_warning_lines( + lines, PackageType.deb + ) + base_ignore_list, debian_ignore_list = parse_ignore_lists( + f"{TEST_BASE_PATH}/files/packaging_warning/packaging_ignore.yml", + PackageType.deb, + ) + base_warnings_to_be_raised = get_warnings_to_be_raised( + base_ignore_list, base_warning_lines + ) + debian_warnings_to_be_raised = get_warnings_to_be_raised( + debian_ignore_list, debian_warning_lines + ) + error_message = get_error_message( + base_warnings_to_be_raised, debian_warnings_to_be_raised, PackageType.deb + ) + assert error_message == "Warning lines:\nWarning: Unhandled\n" + + +def test_validate_output_deb(): + with open( + f"{TEST_BASE_PATH}/files/packaging_warning/sample_warning_build_output_deb.txt", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + output = reader.read() + with pytest.raises(SystemExit): + validate_output( + output, + f"{TEST_BASE_PATH}/files/packaging_warning/packaging_ignore.yml", + PackageType.deb, + ) + + +def test_validate_output_rpm(): + with open( + f"{TEST_BASE_PATH}/files/packaging_warning/sample_warning_build_output_rpm.txt", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + output = reader.read() + with pytest.raises(SystemExit): + validate_output( + output, + f"{TEST_BASE_PATH}/files/packaging_warning/packaging_ignore_without_rpm_rules.yml", + PackageType.rpm, + ) + + +def test_validate_output_rpm_success(): + with open( + f"{TEST_BASE_PATH}/files/packaging_warning/sample_warning_build_output_rpm_success.txt", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + output = reader.read() + validate_output( + output, + f"{TEST_BASE_PATH}/files/packaging_warning/packaging_ignore.yml", + PackageType.rpm, + ) diff --git a/packaging_automation/tests/test_prepare_release.py b/packaging_automation/tests/test_prepare_release.py index 19a637ba..61e1da98 100644 --- a/packaging_automation/tests/test_prepare_release.py +++ b/packaging_automation/tests/test_prepare_release.py @@ -1,252 +1,252 @@ -import os -import uuid -from datetime import datetime - -import pathlib2 - -from ..common_tool_methods import ( - file_includes_line, - count_line_in_file, - run, - get_last_commit_message, - remove_cloned_code, -) -from ..prepare_release import ( - update_release, - MULTI_EXTENSION_OUT, - MULTI_EXTENSION_SQL, - CONFIGURE, - CONFIGURE_IN, - CITUS_CONTROL, - CONFIG_PY, - ProjectParams, -) - -github_token = os.getenv("GH_TOKEN") - -BASE_PATH = ( - pathlib2.Path(__file__).parents[2] - if os.getenv("BASE_PATH") is None - else os.getenv("BASE_PATH") -) - -MAIN_BRANCH = "test-tools-scripts" -TEST_CHECKOUT_DIR = "citus_test" - -resources_to_be_deleted = [] - - -def initialize_env() -> str: - test_base_path_major = f"{BASE_PATH}/{uuid.uuid4()}" - remove_cloned_code(test_base_path_major) - if not os.path.exists(test_base_path_major): - run(f"git clone https://github.com/citusdata/citus.git {test_base_path_major}") - return test_base_path_major - - -def test_major_release(): - test_base_path_major = initialize_env() - os.chdir(test_base_path_major) - run(f"git checkout {MAIN_BRANCH}") - resources_to_be_deleted.append(test_base_path_major) - - previous_print_extension_changes = count_line_in_file( - test_base_path_major, - MULTI_EXTENSION_OUT, - "SELECT * FROM print_extension_changes();", - ) - - project_params = ProjectParams( - project_version="10.1.0", - project_name="citus", - main_branch=MAIN_BRANCH, - schema_version="", - ) - update_release_return_value = update_release( - github_token=github_token, - project_params=project_params, - earliest_pr_date=datetime.strptime("2021.03.25 00:00", "%Y.%m.%d %H:%M"), - exec_path=test_base_path_major, - is_test=True, - ) - - run(f"git checkout {update_release_return_value.release_branch_name}") - - assert file_includes_line(test_base_path_major, MULTI_EXTENSION_OUT, " 10.1.0") - assert file_includes_line( - test_base_path_major, CONFIGURE_IN, "AC_INIT([Citus], [10.1.0])" - ) - assert file_includes_line( - test_base_path_major, CONFIGURE, "PACKAGE_VERSION='10.1.0'" - ) - assert file_includes_line( - test_base_path_major, CONFIGURE, "PACKAGE_STRING='Citus 10.1.0'" - ) - assert file_includes_line( - test_base_path_major, - CONFIGURE, - r"\`configure' configures Citus 10.1.0 to adapt to many kinds of systems.", - ) - assert file_includes_line( - test_base_path_major, - CONFIGURE, - ' short | recursive ) echo "Configuration of Citus 10.1.0:";;', - ) - assert file_includes_line( - test_base_path_major, CONFIGURE, "PACKAGE_VERSION='10.1.0'" - ) - assert ( - get_last_commit_message(test_base_path_major) - == "Bump citus version to 10.1.0\n" - ) - - run(f"git checkout {update_release_return_value.upcoming_version_branch}") - - assert file_includes_line( - test_base_path_major, CITUS_CONTROL, "default_version = '10.2-1'" - ) - assert file_includes_line( - test_base_path_major, - MULTI_EXTENSION_OUT, - "-- Test downgrade to 10.1-1 from 10.2-1", - ) - assert file_includes_line( - test_base_path_major, - MULTI_EXTENSION_OUT, - "ALTER EXTENSION citus UPDATE TO '10.1-1';", - ) - assert ( - count_line_in_file( - test_base_path_major, - MULTI_EXTENSION_OUT, - "ALTER EXTENSION citus UPDATE TO '10.2-1';", - ) - == 2 - ) - assert file_includes_line( - test_base_path_major, - MULTI_EXTENSION_OUT, - "-- Should be empty result since upgrade+downgrade should be a no-op", - ) - assert ( - count_line_in_file( - test_base_path_major, - MULTI_EXTENSION_OUT, - "SELECT * FROM print_extension_changes();", - ) - - previous_print_extension_changes - == 2 - ) - assert file_includes_line( - test_base_path_major, MULTI_EXTENSION_OUT, "-- Snapshot of state at 10.2-1" - ) - assert file_includes_line(test_base_path_major, MULTI_EXTENSION_OUT, " 10.2devel") - - assert ( - count_line_in_file( - test_base_path_major, - MULTI_EXTENSION_SQL, - "ALTER EXTENSION citus UPDATE TO '10.2-1';", - ) - == 2 - ) - assert file_includes_line( - test_base_path_major, CONFIG_PY, "MASTER_VERSION = '10.2'" - ) - assert file_includes_line( - test_base_path_major, CONFIGURE_IN, "AC_INIT([Citus], [10.2devel])" - ) - assert file_includes_line( - test_base_path_major, CONFIGURE, "PACKAGE_VERSION='10.2devel'" - ) - assert file_includes_line( - test_base_path_major, CONFIGURE, "PACKAGE_STRING='Citus 10.2devel'" - ) - assert file_includes_line( - test_base_path_major, - CONFIGURE, - r"\`configure' configures Citus 10.2devel to adapt to many kinds of systems.", - ) - assert file_includes_line( - test_base_path_major, - CONFIGURE, - ' short | recursive ) echo "Configuration of Citus 10.2devel:";;', - ) - assert file_includes_line( - test_base_path_major, CONFIGURE, "PACKAGE_VERSION='10.2devel'" - ) - assert os.path.exists( - f"{test_base_path_major}/{update_release_return_value.upgrade_path_sql_file}" - ) - assert os.path.exists( - f"{test_base_path_major}/{update_release_return_value.downgrade_path_sql_file}" - ) - assert ( - get_last_commit_message(test_base_path_major) - == "Bump citus version to 10.2devel\n" - ) - run(f"git checkout {MAIN_BRANCH}") - - -def test_patch_release(): - test_base_path_patch = initialize_env() - resources_to_be_deleted.append(test_base_path_patch) - os.chdir(test_base_path_patch) - project_params = ProjectParams( - project_version="10.0.4", - project_name="citus", - main_branch=MAIN_BRANCH, - schema_version="10.1-5", - ) - try: - update_release( - github_token=github_token, - project_params=project_params, - earliest_pr_date=datetime.strptime("2021.03.25 00:00", "%Y.%m.%d %H:%M"), - exec_path=test_base_path_patch, - is_test=True, - ) - assert file_includes_line( - test_base_path_patch, - MULTI_EXTENSION_OUT, - f" {project_params.project_version}", - ) - assert file_includes_line( - test_base_path_patch, - CONFIGURE_IN, - f"AC_INIT([Citus], [{project_params.project_version}])", - ) - assert file_includes_line( - test_base_path_patch, - CONFIGURE, - f"PACKAGE_VERSION='{project_params.project_version}'", - ) - assert file_includes_line( - test_base_path_patch, - CONFIGURE, - f"PACKAGE_STRING='Citus {project_params.project_version}'", - ) - assert file_includes_line( - test_base_path_patch, - CONFIGURE, - rf"\`configure' configures Citus {project_params.project_version} to adapt to many kinds of systems.", - ) - assert file_includes_line( - test_base_path_patch, - CONFIGURE, - f' short | recursive ) echo "Configuration of Citus {project_params.project_version}:";;', - ) - assert file_includes_line( - test_base_path_patch, - CONFIGURE, - f"PACKAGE_VERSION='{project_params.project_version}'", - ) - assert file_includes_line( - test_base_path_patch, - CITUS_CONTROL, - f"default_version = '{project_params.schema_version}'", - ) - run(f"git checkout {MAIN_BRANCH}") - finally: - for path in resources_to_be_deleted: - remove_cloned_code(path) +import os +import uuid +from datetime import datetime + +import pathlib2 + +from ..common_tool_methods import ( + file_includes_line, + count_line_in_file, + run, + get_last_commit_message, + remove_cloned_code, +) +from ..prepare_release import ( + update_release, + MULTI_EXTENSION_OUT, + MULTI_EXTENSION_SQL, + CONFIGURE, + CONFIGURE_IN, + CITUS_CONTROL, + CONFIG_PY, + ProjectParams, +) + +github_token = os.getenv("GH_TOKEN") + +BASE_PATH = ( + pathlib2.Path(__file__).parents[2] + if os.getenv("BASE_PATH") is None + else os.getenv("BASE_PATH") +) + +MAIN_BRANCH = "test-tools-scripts" +TEST_CHECKOUT_DIR = "citus_test" + +resources_to_be_deleted = [] + + +def initialize_env() -> str: + test_base_path_major = f"{BASE_PATH}/{uuid.uuid4()}" + remove_cloned_code(test_base_path_major) + if not os.path.exists(test_base_path_major): + run(f"git clone https://github.com/citusdata/citus.git {test_base_path_major}") + return test_base_path_major + + +def test_major_release(): + test_base_path_major = initialize_env() + os.chdir(test_base_path_major) + run(f"git checkout {MAIN_BRANCH}") + resources_to_be_deleted.append(test_base_path_major) + + previous_print_extension_changes = count_line_in_file( + test_base_path_major, + MULTI_EXTENSION_OUT, + "SELECT * FROM print_extension_changes();", + ) + + project_params = ProjectParams( + project_version="10.1.0", + project_name="citus", + main_branch=MAIN_BRANCH, + schema_version="", + ) + update_release_return_value = update_release( + github_token=github_token, + project_params=project_params, + earliest_pr_date=datetime.strptime("2021.03.25 00:00", "%Y.%m.%d %H:%M"), + exec_path=test_base_path_major, + is_test=True, + ) + + run(f"git checkout {update_release_return_value.release_branch_name}") + + assert file_includes_line(test_base_path_major, MULTI_EXTENSION_OUT, " 10.1.0") + assert file_includes_line( + test_base_path_major, CONFIGURE_IN, "AC_INIT([Citus], [10.1.0])" + ) + assert file_includes_line( + test_base_path_major, CONFIGURE, "PACKAGE_VERSION='10.1.0'" + ) + assert file_includes_line( + test_base_path_major, CONFIGURE, "PACKAGE_STRING='Citus 10.1.0'" + ) + assert file_includes_line( + test_base_path_major, + CONFIGURE, + r"\`configure' configures Citus 10.1.0 to adapt to many kinds of systems.", + ) + assert file_includes_line( + test_base_path_major, + CONFIGURE, + ' short | recursive ) echo "Configuration of Citus 10.1.0:";;', + ) + assert file_includes_line( + test_base_path_major, CONFIGURE, "PACKAGE_VERSION='10.1.0'" + ) + assert ( + get_last_commit_message(test_base_path_major) + == "Bump citus version to 10.1.0\n" + ) + + run(f"git checkout {update_release_return_value.upcoming_version_branch}") + + assert file_includes_line( + test_base_path_major, CITUS_CONTROL, "default_version = '10.2-1'" + ) + assert file_includes_line( + test_base_path_major, + MULTI_EXTENSION_OUT, + "-- Test downgrade to 10.1-1 from 10.2-1", + ) + assert file_includes_line( + test_base_path_major, + MULTI_EXTENSION_OUT, + "ALTER EXTENSION citus UPDATE TO '10.1-1';", + ) + assert ( + count_line_in_file( + test_base_path_major, + MULTI_EXTENSION_OUT, + "ALTER EXTENSION citus UPDATE TO '10.2-1';", + ) + == 2 + ) + assert file_includes_line( + test_base_path_major, + MULTI_EXTENSION_OUT, + "-- Should be empty result since upgrade+downgrade should be a no-op", + ) + assert ( + count_line_in_file( + test_base_path_major, + MULTI_EXTENSION_OUT, + "SELECT * FROM print_extension_changes();", + ) + - previous_print_extension_changes + == 2 + ) + assert file_includes_line( + test_base_path_major, MULTI_EXTENSION_OUT, "-- Snapshot of state at 10.2-1" + ) + assert file_includes_line(test_base_path_major, MULTI_EXTENSION_OUT, " 10.2devel") + + assert ( + count_line_in_file( + test_base_path_major, + MULTI_EXTENSION_SQL, + "ALTER EXTENSION citus UPDATE TO '10.2-1';", + ) + == 2 + ) + assert file_includes_line( + test_base_path_major, CONFIG_PY, "MASTER_VERSION = '10.2'" + ) + assert file_includes_line( + test_base_path_major, CONFIGURE_IN, "AC_INIT([Citus], [10.2devel])" + ) + assert file_includes_line( + test_base_path_major, CONFIGURE, "PACKAGE_VERSION='10.2devel'" + ) + assert file_includes_line( + test_base_path_major, CONFIGURE, "PACKAGE_STRING='Citus 10.2devel'" + ) + assert file_includes_line( + test_base_path_major, + CONFIGURE, + r"\`configure' configures Citus 10.2devel to adapt to many kinds of systems.", + ) + assert file_includes_line( + test_base_path_major, + CONFIGURE, + ' short | recursive ) echo "Configuration of Citus 10.2devel:";;', + ) + assert file_includes_line( + test_base_path_major, CONFIGURE, "PACKAGE_VERSION='10.2devel'" + ) + assert os.path.exists( + f"{test_base_path_major}/{update_release_return_value.upgrade_path_sql_file}" + ) + assert os.path.exists( + f"{test_base_path_major}/{update_release_return_value.downgrade_path_sql_file}" + ) + assert ( + get_last_commit_message(test_base_path_major) + == "Bump citus version to 10.2devel\n" + ) + run(f"git checkout {MAIN_BRANCH}") + + +def test_patch_release(): + test_base_path_patch = initialize_env() + resources_to_be_deleted.append(test_base_path_patch) + os.chdir(test_base_path_patch) + project_params = ProjectParams( + project_version="10.0.4", + project_name="citus", + main_branch=MAIN_BRANCH, + schema_version="10.1-5", + ) + try: + update_release( + github_token=github_token, + project_params=project_params, + earliest_pr_date=datetime.strptime("2021.03.25 00:00", "%Y.%m.%d %H:%M"), + exec_path=test_base_path_patch, + is_test=True, + ) + assert file_includes_line( + test_base_path_patch, + MULTI_EXTENSION_OUT, + f" {project_params.project_version}", + ) + assert file_includes_line( + test_base_path_patch, + CONFIGURE_IN, + f"AC_INIT([Citus], [{project_params.project_version}])", + ) + assert file_includes_line( + test_base_path_patch, + CONFIGURE, + f"PACKAGE_VERSION='{project_params.project_version}'", + ) + assert file_includes_line( + test_base_path_patch, + CONFIGURE, + f"PACKAGE_STRING='Citus {project_params.project_version}'", + ) + assert file_includes_line( + test_base_path_patch, + CONFIGURE, + rf"\`configure' configures Citus {project_params.project_version} to adapt to many kinds of systems.", + ) + assert file_includes_line( + test_base_path_patch, + CONFIGURE, + f' short | recursive ) echo "Configuration of Citus {project_params.project_version}:";;', + ) + assert file_includes_line( + test_base_path_patch, + CONFIGURE, + f"PACKAGE_VERSION='{project_params.project_version}'", + ) + assert file_includes_line( + test_base_path_patch, + CITUS_CONTROL, + f"default_version = '{project_params.schema_version}'", + ) + run(f"git checkout {MAIN_BRANCH}") + finally: + for path in resources_to_be_deleted: + remove_cloned_code(path) diff --git a/packaging_automation/tests/test_publish_docker.py b/packaging_automation/tests/test_publish_docker.py index 740ff446..400f6276 100644 --- a/packaging_automation/tests/test_publish_docker.py +++ b/packaging_automation/tests/test_publish_docker.py @@ -1,128 +1,128 @@ -import os - -import docker -import pathlib2 -import pytest - -from ..common_tool_methods import remove_prefix, run, run_with_output -from ..publish_docker import ( - decode_triggering_event_info, - GithubTriggerEventSource, - decode_tag_parts, - get_image_tag, - DockerImageType, - publish_main_docker_images, - publish_tagged_docker_images, - publish_nightly_docker_image, -) - -NON_DEFAULT_BRANCH_NAME = "12.0.0_test" -DEFAULT_BRANCH_NAME = "master" -TAG_NAME = "v12.0.0" -INVALID_TAG_NAME = "v12.x" -DOCKER_IMAGE_NAME = "citusdata/citus" -docker_client = docker.from_env() - -BASE_PATH = os.getenv("BASE_PATH", default=pathlib2.Path(__file__).parents[2]) -EXEC_PATH = f"{BASE_PATH}/docker" - - -def initialize_env(): - if not os.path.exists("docker"): - run("git clone https://github.com/citusdata/docker.git") - - -def test_decode_triggering_event_info(): - event_source, branch_name = decode_triggering_event_info( - f"refs/heads/{NON_DEFAULT_BRANCH_NAME}" - ) - assert ( - event_source == GithubTriggerEventSource.branch_push - and branch_name == NON_DEFAULT_BRANCH_NAME - ) - - event_source, tag_name = decode_triggering_event_info(f"refs/tags/{TAG_NAME}") - assert event_source == GithubTriggerEventSource.tag_push and tag_name == TAG_NAME - - -def test_decode_tag_parts(): - tag_parts = decode_tag_parts(TAG_NAME) - assert ( - len(tag_parts) == 3 - and tag_parts[0] == "12" - and tag_parts[1] == "0" - and tag_parts[2] == "0" - ) - - with pytest.raises(ValueError): - decode_tag_parts(INVALID_TAG_NAME) - - -def test_get_image_tag(): - image_name = get_image_tag(remove_prefix(TAG_NAME, "v"), DockerImageType.latest) - assert image_name == "12.0.0" - - image_name = get_image_tag( - remove_prefix(TAG_NAME, "v"), DockerImageType.postgres_15 - ) - assert image_name == "12.0.0-pg15" - - -def test_publish_main_docker_images(): - initialize_env() - os.chdir("docker") - - try: - run_with_output("git checkout -b docker-unit-test") - publish_main_docker_images(DockerImageType.latest, False) - docker_client.images.get("citusdata/citus:latest") - finally: - run_with_output("git checkout master") - run_with_output("git branch -D docker-unit-test") - - -def test_publish_tagged_docker_images_latest(): - initialize_env() - os.chdir("docker") - try: - run_with_output("git checkout -b docker-unit-test") - publish_tagged_docker_images(DockerImageType.latest, "v12.0.0", False) - docker_client.images.get("citusdata/citus:12") - docker_client.images.get("citusdata/citus:12.0") - docker_client.images.get("citusdata/citus:12.0.0") - finally: - run_with_output("git checkout master") - run_with_output("git branch -D docker-unit-test") - - -def test_publish_tagged_docker_images_alpine(): - initialize_env() - os.chdir("docker") - try: - run_with_output("git checkout -b docker-unit-test") - publish_tagged_docker_images(DockerImageType.alpine, TAG_NAME, False) - docker_client.images.get("citusdata/citus:12-alpine") - docker_client.images.get("citusdata/citus:12.0-alpine") - docker_client.images.get("citusdata/citus:12.0.0-alpine") - finally: - run_with_output("git checkout master") - run_with_output("git branch -D docker-unit-test") - - -def test_publish_nightly_docker_image(): - initialize_env() - os.chdir("docker") - try: - run_with_output("git checkout -b docker-unit-test") - publish_nightly_docker_image(False) - docker_client.images.get("citusdata/citus:nightly") - finally: - run_with_output("git checkout master") - run_with_output("git branch -D docker-unit-test") - - -def clear_env(): - if os.path.exists("../docker"): - os.chdir("..") - run("chmod -R 777 docker") - run("sudo rm -rf docker") +import os + +import docker +import pathlib2 +import pytest + +from ..common_tool_methods import remove_prefix, run, run_with_output +from ..publish_docker import ( + decode_triggering_event_info, + GithubTriggerEventSource, + decode_tag_parts, + get_image_tag, + DockerImageType, + publish_main_docker_images, + publish_tagged_docker_images, + publish_nightly_docker_image, +) + +NON_DEFAULT_BRANCH_NAME = "12.0.0_test" +DEFAULT_BRANCH_NAME = "master" +TAG_NAME = "v12.0.0" +INVALID_TAG_NAME = "v12.x" +DOCKER_IMAGE_NAME = "citusdata/citus" +docker_client = docker.from_env() + +BASE_PATH = os.getenv("BASE_PATH", default=pathlib2.Path(__file__).parents[2]) +EXEC_PATH = f"{BASE_PATH}/docker" + + +def initialize_env(): + if not os.path.exists("docker"): + run("git clone https://github.com/citusdata/docker.git") + + +def test_decode_triggering_event_info(): + event_source, branch_name = decode_triggering_event_info( + f"refs/heads/{NON_DEFAULT_BRANCH_NAME}" + ) + assert ( + event_source == GithubTriggerEventSource.branch_push + and branch_name == NON_DEFAULT_BRANCH_NAME + ) + + event_source, tag_name = decode_triggering_event_info(f"refs/tags/{TAG_NAME}") + assert event_source == GithubTriggerEventSource.tag_push and tag_name == TAG_NAME + + +def test_decode_tag_parts(): + tag_parts = decode_tag_parts(TAG_NAME) + assert ( + len(tag_parts) == 3 + and tag_parts[0] == "12" + and tag_parts[1] == "0" + and tag_parts[2] == "0" + ) + + with pytest.raises(ValueError): + decode_tag_parts(INVALID_TAG_NAME) + + +def test_get_image_tag(): + image_name = get_image_tag(remove_prefix(TAG_NAME, "v"), DockerImageType.latest) + assert image_name == "12.0.0" + + image_name = get_image_tag( + remove_prefix(TAG_NAME, "v"), DockerImageType.postgres_15 + ) + assert image_name == "12.0.0-pg15" + + +def test_publish_main_docker_images(): + initialize_env() + os.chdir("docker") + + try: + run_with_output("git checkout -b docker-unit-test") + publish_main_docker_images(DockerImageType.latest, False) + docker_client.images.get("citusdata/citus:latest") + finally: + run_with_output("git checkout master") + run_with_output("git branch -D docker-unit-test") + + +def test_publish_tagged_docker_images_latest(): + initialize_env() + os.chdir("docker") + try: + run_with_output("git checkout -b docker-unit-test") + publish_tagged_docker_images(DockerImageType.latest, "v12.0.0", False) + docker_client.images.get("citusdata/citus:12") + docker_client.images.get("citusdata/citus:12.0") + docker_client.images.get("citusdata/citus:12.0.0") + finally: + run_with_output("git checkout master") + run_with_output("git branch -D docker-unit-test") + + +def test_publish_tagged_docker_images_alpine(): + initialize_env() + os.chdir("docker") + try: + run_with_output("git checkout -b docker-unit-test") + publish_tagged_docker_images(DockerImageType.alpine, TAG_NAME, False) + docker_client.images.get("citusdata/citus:12-alpine") + docker_client.images.get("citusdata/citus:12.0-alpine") + docker_client.images.get("citusdata/citus:12.0.0-alpine") + finally: + run_with_output("git checkout master") + run_with_output("git branch -D docker-unit-test") + + +def test_publish_nightly_docker_image(): + initialize_env() + os.chdir("docker") + try: + run_with_output("git checkout -b docker-unit-test") + publish_nightly_docker_image(False) + docker_client.images.get("citusdata/citus:nightly") + finally: + run_with_output("git checkout master") + run_with_output("git branch -D docker-unit-test") + + +def clear_env(): + if os.path.exists("../docker"): + os.chdir("..") + run("chmod -R 777 docker") + run("sudo rm -rf docker") diff --git a/packaging_automation/tests/test_update_docker.py b/packaging_automation/tests/test_update_docker.py index 06069dc6..c66d90c2 100644 --- a/packaging_automation/tests/test_update_docker.py +++ b/packaging_automation/tests/test_update_docker.py @@ -1,181 +1,181 @@ -import os -from datetime import datetime - -import pathlib2 - -from ..common_tool_methods import ( - run, - get_version_details, - DEFAULT_ENCODING_FOR_FILE_HANDLING, - DEFAULT_UNICODE_ERROR_HANDLER, -) -from dotenv import dotenv_values -from ..update_docker import ( - update_docker_file_for_latest_postgres, - update_regular_docker_compose_file, - update_docker_file_alpine, - update_docker_file_for_postgres15, - update_docker_file_for_postgres14, - update_changelog, -) - -BASE_PATH = os.getenv("BASE_PATH", default=pathlib2.Path(__file__).parents[2]) -TEST_BASE_PATH = f"{BASE_PATH}/docker" -PROJECT_VERSION = "12.0.0" - -POSTGRES_15_VERSION = "15.4" -POSTGRES_14_VERSION = "14.9" - -PROJECT_NAME = "citus" -version_details = get_version_details(PROJECT_VERSION) -TEMPLATE_PATH = f"{BASE_PATH}/packaging_automation/templates/docker" -PKGVARS_FILE = f"{TEST_BASE_PATH}/pkgvars" - - -def setup_module(): - if not os.path.exists("docker"): - run("git clone https://github.com/citusdata/docker.git") - - -def teardown_module(): - if os.path.exists("docker"): - run("chmod -R 777 docker") - run("sudo rm -rf docker") - - -def test_update_docker_file_for_latest_postgres(): - update_docker_file_for_latest_postgres( - PROJECT_VERSION, TEMPLATE_PATH, TEST_BASE_PATH, POSTGRES_14_VERSION - ) - with open( - f"{TEST_BASE_PATH}/Dockerfile", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - content = reader.read() - lines = content.splitlines() - assert lines[2].strip() == f"FROM postgres:{POSTGRES_14_VERSION}" - assert lines[3].strip() == f"ARG VERSION={PROJECT_VERSION}" - assert ( - f"postgresql-$PG_MAJOR-{PROJECT_NAME}-" - f"{version_details['major']}.{version_details['minor']}=$CITUS_VERSION" - in lines[21] - ) - assert len(lines) == 42 - - -def test_update_regular_docker_compose_file(): - update_regular_docker_compose_file(PROJECT_VERSION, TEMPLATE_PATH, TEST_BASE_PATH) - parameterized_str = f' image: "citusdata/{PROJECT_NAME}:{PROJECT_VERSION}"' - with open( - f"{TEST_BASE_PATH}/docker-compose.yml", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - content = reader.read() - lines = content.splitlines() - assert lines[7] == parameterized_str - assert lines[17] == parameterized_str - assert len(lines) == 34 - - -def test_update_docker_file_alpine(): - update_docker_file_alpine( - PROJECT_VERSION, TEMPLATE_PATH, TEST_BASE_PATH, POSTGRES_14_VERSION - ) - with open( - f"{TEST_BASE_PATH}/alpine/Dockerfile", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - content = reader.read() - lines = content.splitlines() - assert lines[2].strip() == f"FROM postgres:{POSTGRES_14_VERSION}-alpine" - assert lines[3].strip() == f"ARG VERSION={PROJECT_VERSION}" - assert len(lines) == 58 - - -def test_update_docker_file_for_postgres14(): - update_docker_file_for_postgres14( - PROJECT_VERSION, TEMPLATE_PATH, TEST_BASE_PATH, POSTGRES_14_VERSION - ) - with open( - f"{TEST_BASE_PATH}/postgres-14/Dockerfile", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - content = reader.read() - lines = content.splitlines() - assert lines[2].strip() == f"FROM postgres:{POSTGRES_14_VERSION}" - assert lines[3].strip() == f"ARG VERSION={PROJECT_VERSION}" - assert ( - f"postgresql-$PG_MAJOR-{PROJECT_NAME}-" - f"{version_details['major']}.{version_details['minor']}=$CITUS_VERSION" - in lines[21] - ) - assert len(lines) == 42 - - -def test_update_docker_file_for_postgres15(): - update_docker_file_for_postgres15( - PROJECT_VERSION, TEMPLATE_PATH, TEST_BASE_PATH, POSTGRES_15_VERSION - ) - with open( - f"{TEST_BASE_PATH}/postgres-15/Dockerfile", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - content = reader.read() - lines = content.splitlines() - assert lines[2].strip() == f"FROM postgres:{POSTGRES_15_VERSION}" - assert lines[3].strip() == f"ARG VERSION={PROJECT_VERSION}" - assert ( - f"postgresql-$PG_MAJOR-{PROJECT_NAME}-" - f"{version_details['major']}.{version_details['minor']}=$CITUS_VERSION" - in lines[21] - ) - assert len(lines) == 42 - - -def test_update_changelog_with_postgres(): - update_changelog(PROJECT_VERSION, TEST_BASE_PATH) - with open( - f"{TEST_BASE_PATH}/CHANGELOG.md", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - content = reader.read() - lines = content.splitlines() - assert ( - lines[0] == f"### citus-docker v{PROJECT_VERSION}.docker " - f"({datetime.strftime(datetime.now(), '%B %d,%Y')}) ###" - ) - assert lines[2] == f"* Bump Citus version to {PROJECT_VERSION}" - - -def test_update_changelog_without_postgres(): - with open( - f"{TEST_BASE_PATH}/CHANGELOG.md", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - content = reader.read() - lines = content.splitlines() - assert ( - lines[0] == f"### citus-docker v{PROJECT_VERSION}.docker " - f"({datetime.strftime(datetime.now(), '%B %d,%Y')}) ###" - ) - assert lines[2] == f"* Bump Citus version to {PROJECT_VERSION}" - - -def test_pkgvar_postgres_version_existence(): - config = dotenv_values(PKGVARS_FILE) - assert config["postgres_15_version"] - assert config["postgres_14_version"] +import os +from datetime import datetime + +import pathlib2 + +from ..common_tool_methods import ( + run, + get_version_details, + DEFAULT_ENCODING_FOR_FILE_HANDLING, + DEFAULT_UNICODE_ERROR_HANDLER, +) +from dotenv import dotenv_values +from ..update_docker import ( + update_docker_file_for_latest_postgres, + update_regular_docker_compose_file, + update_docker_file_alpine, + update_docker_file_for_postgres15, + update_docker_file_for_postgres14, + update_changelog, +) + +BASE_PATH = os.getenv("BASE_PATH", default=pathlib2.Path(__file__).parents[2]) +TEST_BASE_PATH = f"{BASE_PATH}/docker" +PROJECT_VERSION = "12.0.0" + +POSTGRES_15_VERSION = "15.4" +POSTGRES_14_VERSION = "14.9" + +PROJECT_NAME = "citus" +version_details = get_version_details(PROJECT_VERSION) +TEMPLATE_PATH = f"{BASE_PATH}/packaging_automation/templates/docker" +PKGVARS_FILE = f"{TEST_BASE_PATH}/pkgvars" + + +def setup_module(): + if not os.path.exists("docker"): + run("git clone https://github.com/citusdata/docker.git") + + +def teardown_module(): + if os.path.exists("docker"): + run("chmod -R 777 docker") + run("sudo rm -rf docker") + + +def test_update_docker_file_for_latest_postgres(): + update_docker_file_for_latest_postgres( + PROJECT_VERSION, TEMPLATE_PATH, TEST_BASE_PATH, POSTGRES_14_VERSION + ) + with open( + f"{TEST_BASE_PATH}/Dockerfile", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + content = reader.read() + lines = content.splitlines() + assert lines[2].strip() == f"FROM postgres:{POSTGRES_14_VERSION}" + assert lines[3].strip() == f"ARG VERSION={PROJECT_VERSION}" + assert ( + f"postgresql-$PG_MAJOR-{PROJECT_NAME}-" + f"{version_details['major']}.{version_details['minor']}=$CITUS_VERSION" + in lines[21] + ) + assert len(lines) == 42 + + +def test_update_regular_docker_compose_file(): + update_regular_docker_compose_file(PROJECT_VERSION, TEMPLATE_PATH, TEST_BASE_PATH) + parameterized_str = f' image: "citusdata/{PROJECT_NAME}:{PROJECT_VERSION}"' + with open( + f"{TEST_BASE_PATH}/docker-compose.yml", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + content = reader.read() + lines = content.splitlines() + assert lines[7] == parameterized_str + assert lines[17] == parameterized_str + assert len(lines) == 34 + + +def test_update_docker_file_alpine(): + update_docker_file_alpine( + PROJECT_VERSION, TEMPLATE_PATH, TEST_BASE_PATH, POSTGRES_14_VERSION + ) + with open( + f"{TEST_BASE_PATH}/alpine/Dockerfile", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + content = reader.read() + lines = content.splitlines() + assert lines[2].strip() == f"FROM postgres:{POSTGRES_14_VERSION}-alpine" + assert lines[3].strip() == f"ARG VERSION={PROJECT_VERSION}" + assert len(lines) == 58 + + +def test_update_docker_file_for_postgres14(): + update_docker_file_for_postgres14( + PROJECT_VERSION, TEMPLATE_PATH, TEST_BASE_PATH, POSTGRES_14_VERSION + ) + with open( + f"{TEST_BASE_PATH}/postgres-14/Dockerfile", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + content = reader.read() + lines = content.splitlines() + assert lines[2].strip() == f"FROM postgres:{POSTGRES_14_VERSION}" + assert lines[3].strip() == f"ARG VERSION={PROJECT_VERSION}" + assert ( + f"postgresql-$PG_MAJOR-{PROJECT_NAME}-" + f"{version_details['major']}.{version_details['minor']}=$CITUS_VERSION" + in lines[21] + ) + assert len(lines) == 42 + + +def test_update_docker_file_for_postgres15(): + update_docker_file_for_postgres15( + PROJECT_VERSION, TEMPLATE_PATH, TEST_BASE_PATH, POSTGRES_15_VERSION + ) + with open( + f"{TEST_BASE_PATH}/postgres-15/Dockerfile", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + content = reader.read() + lines = content.splitlines() + assert lines[2].strip() == f"FROM postgres:{POSTGRES_15_VERSION}" + assert lines[3].strip() == f"ARG VERSION={PROJECT_VERSION}" + assert ( + f"postgresql-$PG_MAJOR-{PROJECT_NAME}-" + f"{version_details['major']}.{version_details['minor']}=$CITUS_VERSION" + in lines[21] + ) + assert len(lines) == 42 + + +def test_update_changelog_with_postgres(): + update_changelog(PROJECT_VERSION, TEST_BASE_PATH) + with open( + f"{TEST_BASE_PATH}/CHANGELOG.md", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + content = reader.read() + lines = content.splitlines() + assert ( + lines[0] == f"### citus-docker v{PROJECT_VERSION}.docker " + f"({datetime.strftime(datetime.now(), '%B %d,%Y')}) ###" + ) + assert lines[2] == f"* Bump Citus version to {PROJECT_VERSION}" + + +def test_update_changelog_without_postgres(): + with open( + f"{TEST_BASE_PATH}/CHANGELOG.md", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + content = reader.read() + lines = content.splitlines() + assert ( + lines[0] == f"### citus-docker v{PROJECT_VERSION}.docker " + f"({datetime.strftime(datetime.now(), '%B %d,%Y')}) ###" + ) + assert lines[2] == f"* Bump Citus version to {PROJECT_VERSION}" + + +def test_pkgvar_postgres_version_existence(): + config = dotenv_values(PKGVARS_FILE) + assert config["postgres_15_version"] + assert config["postgres_14_version"] diff --git a/packaging_automation/tests/test_update_package_properties.py b/packaging_automation/tests/test_update_package_properties.py index c1bcebed..5ab6cb2c 100644 --- a/packaging_automation/tests/test_update_package_properties.py +++ b/packaging_automation/tests/test_update_package_properties.py @@ -1,237 +1,237 @@ -import os -import re -from datetime import datetime -from shutil import copyfile - -import pathlib2 -import pytest - -from ..common_tool_methods import ( - DEFAULT_UNICODE_ERROR_HANDLER, - DEFAULT_ENCODING_FOR_FILE_HANDLING, -) -from ..update_package_properties import ( - PackagePropertiesParams, - SupportedProject, - debian_changelog_header, - get_rpm_changelog, - prepend_latest_changelog_into_debian_changelog, - spec_file_name, - update_rpm_spec, - update_pkgvars, - update_all_changes, -) -from .test_utils import are_strings_equal - -TEST_BASE_PATH = pathlib2.Path(__file__).parent.absolute() -BASE_PATH = os.getenv("BASE_PATH", default=pathlib2.Path(__file__).parents[1]) -GITHUB_TOKEN = os.getenv("GH_TOKEN") -PROJECT_VERSION = os.getenv("PROJECT_VERSION", default="10.2.4") -TAG_NAME = os.getenv("TAG_NAME", default="v10.2.4") -PROJECT_NAME = os.getenv("PROJECT_NAME", default="citus") -MICROSOFT_EMAIL = os.getenv("MICROSOFT_EMAIL", default="gindibay@microsoft.com") -NAME_SURNAME = os.getenv("NAME_SURNAME", default="Gurkan Indibay") -CHANGELOG_DATE_STR = os.getenv("CHANGELOG_DATE", "Tue, 01 Feb 2022 12:00:47 +0000") -CHANGELOG_DATE = datetime.strptime(CHANGELOG_DATE_STR, "%a, %d %b %Y %H:%M:%S %z") - - -def default_changelog_param_for_test(changelog_date): - changelog_param = PackagePropertiesParams( - project=SupportedProject.citus, - project_version=PROJECT_VERSION, - fancy=True, - fancy_version_number=1, - name_surname=NAME_SURNAME, - microsoft_email=MICROSOFT_EMAIL, - changelog_date=changelog_date, - ) - return changelog_param - - -DEFAULT_CHANGELOG_PARAM_FOR_TEST = default_changelog_param_for_test(CHANGELOG_DATE) - - -def test_get_version_number(): - assert DEFAULT_CHANGELOG_PARAM_FOR_TEST.version_number == "10.2.4-1" - - -def test_get_version_number_with_project_name(): - assert ( - DEFAULT_CHANGELOG_PARAM_FOR_TEST.version_number_with_project_name - == "10.2.4.citus-1" - ) - - -def test_get_debian_changelog_header(): - header = debian_changelog_header(SupportedProject.citus, "10.2.4", True, 2) - assert header == "citus (10.2.4.citus-2) stable; urgency=low" - - -def test_prepend_latest_changelog_into_debian_changelog(): - refer_file_path = f"{TEST_BASE_PATH}/files/debian.changelog.refer" - changelog_file_path = f"{TEST_BASE_PATH}/files/debian.changelog" - copyfile(refer_file_path, changelog_file_path) - - changelog_param = default_changelog_param_for_test(CHANGELOG_DATE) - - try: - prepend_latest_changelog_into_debian_changelog( - changelog_param, changelog_file_path - ) - verify_prepend_debian_changelog(changelog_file_path) - finally: - os.remove(changelog_file_path) - - -def verify_prepend_debian_changelog(changelog_file_path): - with open( - changelog_file_path, - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - content = reader.read() - with open( - f"{TEST_BASE_PATH}/files/verify/debian_changelog_with_10.2.4.txt", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - expected_content = reader.read() - assert content == expected_content - - -def test_rpm_changelog(): - changelog_param = default_changelog_param_for_test(CHANGELOG_DATE) - changelog = get_rpm_changelog(changelog_param) - with open( - f"{TEST_BASE_PATH}/files/verify/rpm_latest_changelog_reference.txt", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - content = reader.read() - assert content == changelog - - -def test_update_rpm_spec(): - project_name = "citus" - spec_file = f"{TEST_BASE_PATH}/files/{spec_file_name(project_name)}" - spec_file_copy = f"{os.getcwd()}/{spec_file_name(project_name)}_copy" - spec_file_reference = f"{TEST_BASE_PATH}/files/citus_include_10_2_4.spec" - templates_path = f"{BASE_PATH}/templates" - copyfile(spec_file, spec_file_copy) - try: - changelog_param = default_changelog_param_for_test(CHANGELOG_DATE) - update_rpm_spec(changelog_param, spec_file, templates_path) - verify_rpm_spec(spec_file_reference, spec_file) - finally: - copyfile(spec_file_copy, spec_file) - os.remove(spec_file_copy) - - -def test_update_rpm_spec_include_10_0_3(): - project_name = "citus" - spec_file = f"{TEST_BASE_PATH}/files/citus_include_10_2_4.spec" - spec_file_copy = f"{os.getcwd()}/{spec_file_name(project_name)}_copy" - templates_path = f"{BASE_PATH}/templates" - copyfile(spec_file, spec_file_copy) - try: - changelog_param = default_changelog_param_for_test(CHANGELOG_DATE) - with pytest.raises(ValueError): - update_rpm_spec(changelog_param, spec_file, templates_path) - finally: - copyfile(spec_file_copy, spec_file) - os.remove(spec_file_copy) - - -def verify_rpm_spec(spec_file_reference, spec_file_for_test): - with open( - spec_file_for_test, - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader_test: - with open( - spec_file_reference, - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader_reference: - test_str = reader_test.read() - reference_str = reader_reference.read() - are_strings_equal(reference_str, test_str) - - -def test_update_pkg_vars(): - templates_path = f"{BASE_PATH}/templates" - pkgvars_path = f"{TEST_BASE_PATH}/files/pkgvars" - pkgvars_copy_path = f"{pkgvars_path}_copy" - copyfile(pkgvars_path, pkgvars_copy_path) - - try: - update_pkgvars( - DEFAULT_CHANGELOG_PARAM_FOR_TEST, templates_path, f"{TEST_BASE_PATH}/files/" - ) - verify_pkgvars(pkgvars_path) - finally: - copyfile(pkgvars_copy_path, pkgvars_path) - os.remove(pkgvars_copy_path) - - -def verify_pkgvars(pkgvars_path): - with open( - pkgvars_path, - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - content = reader.read() - index = content.find(f"pkglatest={PROJECT_VERSION}.{PROJECT_NAME}-1") - assert index > -1 - - -def test_update_all_changes(): - pkgvars_path = f"{TEST_BASE_PATH}/files/pkgvars" - pkgvars_copy_path = f"{pkgvars_path}_copy" - spec_file = f"{TEST_BASE_PATH}/files/{spec_file_name(PROJECT_NAME)}" - spec_file_copy = f"{spec_file}_copy" - spec_file_reference = f"{TEST_BASE_PATH}/files/{spec_file_name(PROJECT_NAME)}" - - changelog_file_path = f"{TEST_BASE_PATH}/files/debian/changelog" - changelog_file_copy_path = f"{changelog_file_path}_copy" - copyfile(changelog_file_path, changelog_file_copy_path) - copyfile(pkgvars_path, pkgvars_copy_path) - copyfile(spec_file, spec_file_copy) - - try: - package_properties_param = PackagePropertiesParams( - project=DEFAULT_CHANGELOG_PARAM_FOR_TEST.project, - project_version=PROJECT_VERSION, - fancy=True, - fancy_version_number=1, - name_surname=NAME_SURNAME, - microsoft_email=MICROSOFT_EMAIL, - changelog_date=CHANGELOG_DATE, - ) - update_all_changes(package_properties_param, f"{TEST_BASE_PATH}/files") - verify_prepend_debian_changelog(changelog_file_path) - verify_pkgvars(pkgvars_path) - verify_rpm_spec(spec_file_reference, spec_file) - finally: - copyfile(changelog_file_copy_path, changelog_file_path) - copyfile(pkgvars_copy_path, pkgvars_path) - copyfile(spec_file_copy, spec_file) - - os.remove(changelog_file_copy_path) - os.remove(pkgvars_copy_path) - os.remove(spec_file_copy) - - -def test_regex(): - print( - re.match( - r"^### \w+\sv\d+\.\d+\.\d+\s\(\w+\s\d+,\s\d+\)\s###$", - "### citus v10.0.3 (March 16, 2021) ###", - ) - ) +import os +import re +from datetime import datetime +from shutil import copyfile + +import pathlib2 +import pytest + +from ..common_tool_methods import ( + DEFAULT_UNICODE_ERROR_HANDLER, + DEFAULT_ENCODING_FOR_FILE_HANDLING, +) +from ..update_package_properties import ( + PackagePropertiesParams, + SupportedProject, + debian_changelog_header, + get_rpm_changelog, + prepend_latest_changelog_into_debian_changelog, + spec_file_name, + update_rpm_spec, + update_pkgvars, + update_all_changes, +) +from .test_utils import are_strings_equal + +TEST_BASE_PATH = pathlib2.Path(__file__).parent.absolute() +BASE_PATH = os.getenv("BASE_PATH", default=pathlib2.Path(__file__).parents[1]) +GITHUB_TOKEN = os.getenv("GH_TOKEN") +PROJECT_VERSION = os.getenv("PROJECT_VERSION", default="10.2.4") +TAG_NAME = os.getenv("TAG_NAME", default="v10.2.4") +PROJECT_NAME = os.getenv("PROJECT_NAME", default="citus") +MICROSOFT_EMAIL = os.getenv("MICROSOFT_EMAIL", default="gindibay@microsoft.com") +NAME_SURNAME = os.getenv("NAME_SURNAME", default="Gurkan Indibay") +CHANGELOG_DATE_STR = os.getenv("CHANGELOG_DATE", "Tue, 01 Feb 2022 12:00:47 +0000") +CHANGELOG_DATE = datetime.strptime(CHANGELOG_DATE_STR, "%a, %d %b %Y %H:%M:%S %z") + + +def default_changelog_param_for_test(changelog_date): + changelog_param = PackagePropertiesParams( + project=SupportedProject.citus, + project_version=PROJECT_VERSION, + fancy=True, + fancy_version_number=1, + name_surname=NAME_SURNAME, + microsoft_email=MICROSOFT_EMAIL, + changelog_date=changelog_date, + ) + return changelog_param + + +DEFAULT_CHANGELOG_PARAM_FOR_TEST = default_changelog_param_for_test(CHANGELOG_DATE) + + +def test_get_version_number(): + assert DEFAULT_CHANGELOG_PARAM_FOR_TEST.version_number == "10.2.4-1" + + +def test_get_version_number_with_project_name(): + assert ( + DEFAULT_CHANGELOG_PARAM_FOR_TEST.version_number_with_project_name + == "10.2.4.citus-1" + ) + + +def test_get_debian_changelog_header(): + header = debian_changelog_header(SupportedProject.citus, "10.2.4", True, 2) + assert header == "citus (10.2.4.citus-2) stable; urgency=low" + + +def test_prepend_latest_changelog_into_debian_changelog(): + refer_file_path = f"{TEST_BASE_PATH}/files/debian.changelog.refer" + changelog_file_path = f"{TEST_BASE_PATH}/files/debian.changelog" + copyfile(refer_file_path, changelog_file_path) + + changelog_param = default_changelog_param_for_test(CHANGELOG_DATE) + + try: + prepend_latest_changelog_into_debian_changelog( + changelog_param, changelog_file_path + ) + verify_prepend_debian_changelog(changelog_file_path) + finally: + os.remove(changelog_file_path) + + +def verify_prepend_debian_changelog(changelog_file_path): + with open( + changelog_file_path, + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + content = reader.read() + with open( + f"{TEST_BASE_PATH}/files/verify/debian_changelog_with_10.2.4.txt", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + expected_content = reader.read() + assert content == expected_content + + +def test_rpm_changelog(): + changelog_param = default_changelog_param_for_test(CHANGELOG_DATE) + changelog = get_rpm_changelog(changelog_param) + with open( + f"{TEST_BASE_PATH}/files/verify/rpm_latest_changelog_reference.txt", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + content = reader.read() + assert content == changelog + + +def test_update_rpm_spec(): + project_name = "citus" + spec_file = f"{TEST_BASE_PATH}/files/{spec_file_name(project_name)}" + spec_file_copy = f"{os.getcwd()}/{spec_file_name(project_name)}_copy" + spec_file_reference = f"{TEST_BASE_PATH}/files/citus_include_10_2_4.spec" + templates_path = f"{BASE_PATH}/templates" + copyfile(spec_file, spec_file_copy) + try: + changelog_param = default_changelog_param_for_test(CHANGELOG_DATE) + update_rpm_spec(changelog_param, spec_file, templates_path) + verify_rpm_spec(spec_file_reference, spec_file) + finally: + copyfile(spec_file_copy, spec_file) + os.remove(spec_file_copy) + + +def test_update_rpm_spec_include_10_0_3(): + project_name = "citus" + spec_file = f"{TEST_BASE_PATH}/files/citus_include_10_2_4.spec" + spec_file_copy = f"{os.getcwd()}/{spec_file_name(project_name)}_copy" + templates_path = f"{BASE_PATH}/templates" + copyfile(spec_file, spec_file_copy) + try: + changelog_param = default_changelog_param_for_test(CHANGELOG_DATE) + with pytest.raises(ValueError): + update_rpm_spec(changelog_param, spec_file, templates_path) + finally: + copyfile(spec_file_copy, spec_file) + os.remove(spec_file_copy) + + +def verify_rpm_spec(spec_file_reference, spec_file_for_test): + with open( + spec_file_for_test, + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader_test: + with open( + spec_file_reference, + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader_reference: + test_str = reader_test.read() + reference_str = reader_reference.read() + are_strings_equal(reference_str, test_str) + + +def test_update_pkg_vars(): + templates_path = f"{BASE_PATH}/templates" + pkgvars_path = f"{TEST_BASE_PATH}/files/pkgvars" + pkgvars_copy_path = f"{pkgvars_path}_copy" + copyfile(pkgvars_path, pkgvars_copy_path) + + try: + update_pkgvars( + DEFAULT_CHANGELOG_PARAM_FOR_TEST, templates_path, f"{TEST_BASE_PATH}/files/" + ) + verify_pkgvars(pkgvars_path) + finally: + copyfile(pkgvars_copy_path, pkgvars_path) + os.remove(pkgvars_copy_path) + + +def verify_pkgvars(pkgvars_path): + with open( + pkgvars_path, + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + content = reader.read() + index = content.find(f"pkglatest={PROJECT_VERSION}.{PROJECT_NAME}-1") + assert index > -1 + + +def test_update_all_changes(): + pkgvars_path = f"{TEST_BASE_PATH}/files/pkgvars" + pkgvars_copy_path = f"{pkgvars_path}_copy" + spec_file = f"{TEST_BASE_PATH}/files/{spec_file_name(PROJECT_NAME)}" + spec_file_copy = f"{spec_file}_copy" + spec_file_reference = f"{TEST_BASE_PATH}/files/{spec_file_name(PROJECT_NAME)}" + + changelog_file_path = f"{TEST_BASE_PATH}/files/debian/changelog" + changelog_file_copy_path = f"{changelog_file_path}_copy" + copyfile(changelog_file_path, changelog_file_copy_path) + copyfile(pkgvars_path, pkgvars_copy_path) + copyfile(spec_file, spec_file_copy) + + try: + package_properties_param = PackagePropertiesParams( + project=DEFAULT_CHANGELOG_PARAM_FOR_TEST.project, + project_version=PROJECT_VERSION, + fancy=True, + fancy_version_number=1, + name_surname=NAME_SURNAME, + microsoft_email=MICROSOFT_EMAIL, + changelog_date=CHANGELOG_DATE, + ) + update_all_changes(package_properties_param, f"{TEST_BASE_PATH}/files") + verify_prepend_debian_changelog(changelog_file_path) + verify_pkgvars(pkgvars_path) + verify_rpm_spec(spec_file_reference, spec_file) + finally: + copyfile(changelog_file_copy_path, changelog_file_path) + copyfile(pkgvars_copy_path, pkgvars_path) + copyfile(spec_file_copy, spec_file) + + os.remove(changelog_file_copy_path) + os.remove(pkgvars_copy_path) + os.remove(spec_file_copy) + + +def test_regex(): + print( + re.match( + r"^### \w+\sv\d+\.\d+\.\d+\s\(\w+\s\d+,\s\d+\)\s###$", + "### citus v10.0.3 (March 16, 2021) ###", + ) + ) diff --git a/packaging_automation/tests/test_update_pgxn.py b/packaging_automation/tests/test_update_pgxn.py index 572fc5c4..98d085d1 100644 --- a/packaging_automation/tests/test_update_pgxn.py +++ b/packaging_automation/tests/test_update_pgxn.py @@ -1,57 +1,57 @@ -import os - -import pathlib2 - -from ..common_tool_methods import ( - run, - DEFAULT_UNICODE_ERROR_HANDLER, - DEFAULT_ENCODING_FOR_FILE_HANDLING, -) -from ..update_pgxn import update_meta_json, update_pkgvars - -BASE_PATH = os.getenv("BASE_PATH", default=pathlib2.Path(__file__).parents[2]) -TEST_BASE_PATH = f"{BASE_PATH}/packaging_test" -PROJECT_VERSION = "10.0.3" -PROJECT_NAME = "citus" -TEMPLATE_PATH = f"{BASE_PATH}/packaging_automation/templates/pgxn" - - -def setup_module(): - if not os.path.exists("packaging_test"): - run( - "git clone --branch pgxn-citus https://github.com/citusdata/packaging.git packaging_test" - ) - - -def teardown_module(): - if os.path.exists("packaging_test"): - run("rm -r packaging_test") - - -def test_update_meta_json(): - update_meta_json(PROJECT_VERSION, TEMPLATE_PATH, TEST_BASE_PATH) - with open( - f"{TEST_BASE_PATH}/META.json", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - content = reader.read() - lines = content.splitlines() - assert lines[4] == f' "version": "{PROJECT_VERSION}",' - assert lines[12] == f' "version": "{PROJECT_VERSION}"' - assert len(lines) == 54 - - -def test_update_pkgvars(): - update_pkgvars(PROJECT_VERSION, TEMPLATE_PATH, TEST_BASE_PATH) - with open( - f"{TEST_BASE_PATH}/pkgvars", - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - content = reader.read() - lines = content.splitlines() - assert lines[2] == f"pkglatest={PROJECT_VERSION}" - assert len(lines) == 3 +import os + +import pathlib2 + +from ..common_tool_methods import ( + run, + DEFAULT_UNICODE_ERROR_HANDLER, + DEFAULT_ENCODING_FOR_FILE_HANDLING, +) +from ..update_pgxn import update_meta_json, update_pkgvars + +BASE_PATH = os.getenv("BASE_PATH", default=pathlib2.Path(__file__).parents[2]) +TEST_BASE_PATH = f"{BASE_PATH}/packaging_test" +PROJECT_VERSION = "10.0.3" +PROJECT_NAME = "citus" +TEMPLATE_PATH = f"{BASE_PATH}/packaging_automation/templates/pgxn" + + +def setup_module(): + if not os.path.exists("packaging_test"): + run( + "git clone --branch pgxn-citus https://github.com/citusdata/packaging.git packaging_test" + ) + + +def teardown_module(): + if os.path.exists("packaging_test"): + run("rm -r packaging_test") + + +def test_update_meta_json(): + update_meta_json(PROJECT_VERSION, TEMPLATE_PATH, TEST_BASE_PATH) + with open( + f"{TEST_BASE_PATH}/META.json", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + content = reader.read() + lines = content.splitlines() + assert lines[4] == f' "version": "{PROJECT_VERSION}",' + assert lines[12] == f' "version": "{PROJECT_VERSION}"' + assert len(lines) == 54 + + +def test_update_pkgvars(): + update_pkgvars(PROJECT_VERSION, TEMPLATE_PATH, TEST_BASE_PATH) + with open( + f"{TEST_BASE_PATH}/pkgvars", + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + content = reader.read() + lines = content.splitlines() + assert lines[2] == f"pkglatest={PROJECT_VERSION}" + assert len(lines) == 3 diff --git a/packaging_automation/tests/test_utils.py b/packaging_automation/tests/test_utils.py index faac200d..cd30faaa 100644 --- a/packaging_automation/tests/test_utils.py +++ b/packaging_automation/tests/test_utils.py @@ -1,31 +1,31 @@ -import difflib -import os - -import pathlib2 - -from ..common_tool_methods import run - -TEST_BASE_PATH = os.getenv("BASE_PATH", default=pathlib2.Path(__file__).parents[2]) - - -def are_strings_equal(expected_string: str, actual_str: str) -> bool: - output_list = [ - li for li in difflib.ndiff(expected_string, actual_str) if li[0] != " " - ] - - for output in output_list: - if not (output.strip() == "+" or output.strip() == "-"): - raise ValueError( - f"Actual and expected string are not same Diff:{''.join(output_list)} " - ) - return True - - -def generate_new_gpg_key(gpg_file_name: str): - run(f"gpg --batch --generate-key {gpg_file_name}") - - -def generate_new_gpg_key_with_password(): - run( - f"gpg --batch --generate-key {TEST_BASE_PATH}/packaging_automation/tests/files/gpg/packaging_with_passphrase.gpg" - ) +import difflib +import os + +import pathlib2 + +from ..common_tool_methods import run + +TEST_BASE_PATH = os.getenv("BASE_PATH", default=pathlib2.Path(__file__).parents[2]) + + +def are_strings_equal(expected_string: str, actual_str: str) -> bool: + output_list = [ + li for li in difflib.ndiff(expected_string, actual_str) if li[0] != " " + ] + + for output in output_list: + if not (output.strip() == "+" or output.strip() == "-"): + raise ValueError( + f"Actual and expected string are not same Diff:{''.join(output_list)} " + ) + return True + + +def generate_new_gpg_key(gpg_file_name: str): + run(f"gpg --batch --generate-key {gpg_file_name}") + + +def generate_new_gpg_key_with_password(): + run( + f"gpg --batch --generate-key {TEST_BASE_PATH}/packaging_automation/tests/files/gpg/packaging_with_passphrase.gpg" + ) diff --git a/packaging_automation/update_docker.py b/packaging_automation/update_docker.py index d1edbeff..838a71ca 100644 --- a/packaging_automation/update_docker.py +++ b/packaging_automation/update_docker.py @@ -1,247 +1,247 @@ -import argparse -import os -import uuid -from datetime import datetime -from enum import Enum -from dotenv import dotenv_values -from typing import Tuple - -import pathlib2 - -from .common_tool_methods import ( - process_template_file_with_minor, - write_to_file, - run, - initialize_env, - create_pr, - remove_cloned_code, - DEFAULT_ENCODING_FOR_FILE_HANDLING, - DEFAULT_UNICODE_ERROR_HANDLER, - get_minor_project_version_for_docker, -) - -REPO_OWNER = "citusdata" -PROJECT_NAME = "docker" -MAIN_BRANCH = "master" - - -class SupportedDockerImages(Enum): - latest = 1 - docker_compose = 2 - alpine = 3 - postgres14 = 4 - postgres15 = 5 - - -docker_templates = { - SupportedDockerImages.latest: "latest/latest.tmpl.dockerfile", - SupportedDockerImages.docker_compose: "latest/docker-compose.tmpl.yml", - SupportedDockerImages.alpine: "alpine/alpine.tmpl.dockerfile", - SupportedDockerImages.postgres14: "postgres-14/postgres-14.tmpl.dockerfile", - SupportedDockerImages.postgres15: "postgres-15/postgres-15.tmpl.dockerfile", -} - -docker_outputs = { - SupportedDockerImages.latest: "Dockerfile", - SupportedDockerImages.docker_compose: "docker-compose.yml", - SupportedDockerImages.alpine: "alpine/Dockerfile", - SupportedDockerImages.postgres14: "postgres-14/Dockerfile", - SupportedDockerImages.postgres15: "postgres-15/Dockerfile", -} - -BASE_PATH = pathlib2.Path(__file__).parent.absolute() - - -def update_docker_file_for_latest_postgres( - project_version: str, template_path: str, exec_path: str, postgres_version: str -): - minor_version = get_minor_project_version_for_docker(project_version) - debian_project_version = project_version.replace("_", "-") - content = process_template_file_with_minor( - debian_project_version, - template_path, - docker_templates[SupportedDockerImages.latest], - minor_version, - postgres_version, - ) - dest_file_name = f"{exec_path}/{docker_outputs[SupportedDockerImages.latest]}" - write_to_file(content, dest_file_name) - - -def update_regular_docker_compose_file( - project_version: str, template_path: str, exec_path: str -): - minor_version = get_minor_project_version_for_docker(project_version) - content = process_template_file_with_minor( - project_version, - template_path, - docker_templates[SupportedDockerImages.docker_compose], - minor_version, - ) - dest_file_name = ( - f"{exec_path}/{docker_outputs[SupportedDockerImages.docker_compose]}" - ) - write_to_file(content, dest_file_name) - - -def update_docker_file_alpine( - project_version: str, template_path: str, exec_path: str, postgres_version: str -): - minor_version = get_minor_project_version_for_docker(project_version) - content = process_template_file_with_minor( - project_version, - template_path, - docker_templates[SupportedDockerImages.alpine], - minor_version, - postgres_version, - ) - dest_file_name = f"{exec_path}/{docker_outputs[SupportedDockerImages.alpine]}" - write_to_file(content, dest_file_name) - - -def update_docker_file_for_postgres15( - project_version: str, template_path: str, exec_path: str, postgres_version: str -): - minor_version = get_minor_project_version_for_docker(project_version) - debian_project_version = project_version.replace("_", "-") - content = process_template_file_with_minor( - debian_project_version, - template_path, - docker_templates[SupportedDockerImages.postgres15], - minor_version, - postgres_version, - ) - dest_file_name = f"{exec_path}/{docker_outputs[SupportedDockerImages.postgres15]}" - create_directory_if_not_exists(dest_file_name) - write_to_file(content, dest_file_name) - - -def update_docker_file_for_postgres14( - project_version: str, template_path: str, exec_path: str, postgres_version: str -): - minor_version = get_minor_project_version_for_docker(project_version) - debian_project_version = project_version.replace("_", "-") - content = process_template_file_with_minor( - debian_project_version, - template_path, - docker_templates[SupportedDockerImages.postgres14], - minor_version, - postgres_version, - ) - dest_file_name = f"{exec_path}/{docker_outputs[SupportedDockerImages.postgres14]}" - create_directory_if_not_exists(dest_file_name) - write_to_file(content, dest_file_name) - - -def create_directory_if_not_exists(dest_file_name): - dir_name = os.path.dirname(dest_file_name) - if not os.path.exists(dir_name): - os.makedirs(dir_name) - - -def get_new_changelog_entry(project_version: str): - header = f"### citus-docker v{project_version}.docker ({datetime.strftime(datetime.now(), '%B %d,%Y')}) ###\n" - citus_bump_str = f"\n* Bump Citus version to {project_version}\n" - - changelog_entry = f"{header}{citus_bump_str}" - changelog_entry = f"{changelog_entry}\n" - return changelog_entry - - -def update_changelog(project_version: str, exec_path: str): - latest_changelog = get_new_changelog_entry(project_version) - changelog_file_path = f"{exec_path}/CHANGELOG.md" - with open( - changelog_file_path, - "r+", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - if f"({project_version}" not in reader.readline(): - reader.seek(0, 0) - old_changelog = reader.read() - changelog = f"{latest_changelog}{old_changelog}" - reader.seek(0, 0) - reader.write(changelog) - else: - raise ValueError( - f"Already using version {project_version} in the changelog" - ) - - -def update_all_docker_files(project_version: str, exec_path: str): - template_path = f"{BASE_PATH}/templates/docker" - pkgvars_file = f"{exec_path}/pkgvars" - - ( - postgres_16_version, - postgres_15_version, - postgres_14_version, - ) = read_postgres_versions(pkgvars_file) - - latest_postgres_version = postgres_16_version - - update_docker_file_for_latest_postgres( - project_version, template_path, exec_path, latest_postgres_version - ) - update_regular_docker_compose_file(project_version, template_path, exec_path) - update_docker_file_alpine( - project_version, template_path, exec_path, latest_postgres_version - ) - update_docker_file_for_postgres14( - project_version, template_path, exec_path, postgres_14_version - ) - update_docker_file_for_postgres15( - project_version, template_path, exec_path, postgres_15_version - ) - update_changelog(project_version, exec_path) - - -def read_postgres_versions(pkgvars_file: str) -> Tuple[str, str, str]: - config = dotenv_values(pkgvars_file) - return ( - config["postgres_16_version"], - config["postgres_15_version"], - config["postgres_14_version"], - ) - - -CHECKOUT_DIR = "docker_temp" - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--prj_ver", required=True) - parser.add_argument("--gh_token", required=True) - parser.add_argument("--pipeline", action="store_true") - parser.add_argument("--exec_path") - parser.add_argument("--is_test", action="store_true") - args = parser.parse_args() - - if args.pipeline: - if not args.exec_path: - raise ValueError("exec_path should be defined") - execution_path = args.exec_path - else: - execution_path = f"{os.getcwd()}/{CHECKOUT_DIR}" - initialize_env(execution_path, PROJECT_NAME, execution_path) - - os.chdir(execution_path) - pr_branch = f"release-{args.prj_ver}-{uuid.uuid4()}" - run(f"git checkout -b {pr_branch}") - - update_all_docker_files(args.prj_ver, execution_path) - run("git add --update .") - - commit_message = f"Bump docker to version {args.prj_ver}" - run(f'git commit -m "{commit_message}"') - if not args.is_test: - run(f"git push --set-upstream origin {pr_branch}") - create_pr( - args.gh_token, - pr_branch, - commit_message, - REPO_OWNER, - PROJECT_NAME, - MAIN_BRANCH, - ) - remove_cloned_code(execution_path) +import argparse +import os +import uuid +from datetime import datetime +from enum import Enum +from dotenv import dotenv_values +from typing import Tuple + +import pathlib2 + +from .common_tool_methods import ( + process_template_file_with_minor, + write_to_file, + run, + initialize_env, + create_pr, + remove_cloned_code, + DEFAULT_ENCODING_FOR_FILE_HANDLING, + DEFAULT_UNICODE_ERROR_HANDLER, + get_minor_project_version_for_docker, +) + +REPO_OWNER = "citusdata" +PROJECT_NAME = "docker" +MAIN_BRANCH = "master" + + +class SupportedDockerImages(Enum): + latest = 1 + docker_compose = 2 + alpine = 3 + postgres14 = 4 + postgres15 = 5 + + +docker_templates = { + SupportedDockerImages.latest: "latest/latest.tmpl.dockerfile", + SupportedDockerImages.docker_compose: "latest/docker-compose.tmpl.yml", + SupportedDockerImages.alpine: "alpine/alpine.tmpl.dockerfile", + SupportedDockerImages.postgres14: "postgres-14/postgres-14.tmpl.dockerfile", + SupportedDockerImages.postgres15: "postgres-15/postgres-15.tmpl.dockerfile", +} + +docker_outputs = { + SupportedDockerImages.latest: "Dockerfile", + SupportedDockerImages.docker_compose: "docker-compose.yml", + SupportedDockerImages.alpine: "alpine/Dockerfile", + SupportedDockerImages.postgres14: "postgres-14/Dockerfile", + SupportedDockerImages.postgres15: "postgres-15/Dockerfile", +} + +BASE_PATH = pathlib2.Path(__file__).parent.absolute() + + +def update_docker_file_for_latest_postgres( + project_version: str, template_path: str, exec_path: str, postgres_version: str +): + minor_version = get_minor_project_version_for_docker(project_version) + debian_project_version = project_version.replace("_", "-") + content = process_template_file_with_minor( + debian_project_version, + template_path, + docker_templates[SupportedDockerImages.latest], + minor_version, + postgres_version, + ) + dest_file_name = f"{exec_path}/{docker_outputs[SupportedDockerImages.latest]}" + write_to_file(content, dest_file_name) + + +def update_regular_docker_compose_file( + project_version: str, template_path: str, exec_path: str +): + minor_version = get_minor_project_version_for_docker(project_version) + content = process_template_file_with_minor( + project_version, + template_path, + docker_templates[SupportedDockerImages.docker_compose], + minor_version, + ) + dest_file_name = ( + f"{exec_path}/{docker_outputs[SupportedDockerImages.docker_compose]}" + ) + write_to_file(content, dest_file_name) + + +def update_docker_file_alpine( + project_version: str, template_path: str, exec_path: str, postgres_version: str +): + minor_version = get_minor_project_version_for_docker(project_version) + content = process_template_file_with_minor( + project_version, + template_path, + docker_templates[SupportedDockerImages.alpine], + minor_version, + postgres_version, + ) + dest_file_name = f"{exec_path}/{docker_outputs[SupportedDockerImages.alpine]}" + write_to_file(content, dest_file_name) + + +def update_docker_file_for_postgres15( + project_version: str, template_path: str, exec_path: str, postgres_version: str +): + minor_version = get_minor_project_version_for_docker(project_version) + debian_project_version = project_version.replace("_", "-") + content = process_template_file_with_minor( + debian_project_version, + template_path, + docker_templates[SupportedDockerImages.postgres15], + minor_version, + postgres_version, + ) + dest_file_name = f"{exec_path}/{docker_outputs[SupportedDockerImages.postgres15]}" + create_directory_if_not_exists(dest_file_name) + write_to_file(content, dest_file_name) + + +def update_docker_file_for_postgres14( + project_version: str, template_path: str, exec_path: str, postgres_version: str +): + minor_version = get_minor_project_version_for_docker(project_version) + debian_project_version = project_version.replace("_", "-") + content = process_template_file_with_minor( + debian_project_version, + template_path, + docker_templates[SupportedDockerImages.postgres14], + minor_version, + postgres_version, + ) + dest_file_name = f"{exec_path}/{docker_outputs[SupportedDockerImages.postgres14]}" + create_directory_if_not_exists(dest_file_name) + write_to_file(content, dest_file_name) + + +def create_directory_if_not_exists(dest_file_name): + dir_name = os.path.dirname(dest_file_name) + if not os.path.exists(dir_name): + os.makedirs(dir_name) + + +def get_new_changelog_entry(project_version: str): + header = f"### citus-docker v{project_version}.docker ({datetime.strftime(datetime.now(), '%B %d,%Y')}) ###\n" + citus_bump_str = f"\n* Bump Citus version to {project_version}\n" + + changelog_entry = f"{header}{citus_bump_str}" + changelog_entry = f"{changelog_entry}\n" + return changelog_entry + + +def update_changelog(project_version: str, exec_path: str): + latest_changelog = get_new_changelog_entry(project_version) + changelog_file_path = f"{exec_path}/CHANGELOG.md" + with open( + changelog_file_path, + "r+", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + if f"({project_version}" not in reader.readline(): + reader.seek(0, 0) + old_changelog = reader.read() + changelog = f"{latest_changelog}{old_changelog}" + reader.seek(0, 0) + reader.write(changelog) + else: + raise ValueError( + f"Already using version {project_version} in the changelog" + ) + + +def update_all_docker_files(project_version: str, exec_path: str): + template_path = f"{BASE_PATH}/templates/docker" + pkgvars_file = f"{exec_path}/pkgvars" + + ( + postgres_16_version, + postgres_15_version, + postgres_14_version, + ) = read_postgres_versions(pkgvars_file) + + latest_postgres_version = postgres_16_version + + update_docker_file_for_latest_postgres( + project_version, template_path, exec_path, latest_postgres_version + ) + update_regular_docker_compose_file(project_version, template_path, exec_path) + update_docker_file_alpine( + project_version, template_path, exec_path, latest_postgres_version + ) + update_docker_file_for_postgres14( + project_version, template_path, exec_path, postgres_14_version + ) + update_docker_file_for_postgres15( + project_version, template_path, exec_path, postgres_15_version + ) + update_changelog(project_version, exec_path) + + +def read_postgres_versions(pkgvars_file: str) -> Tuple[str, str, str]: + config = dotenv_values(pkgvars_file) + return ( + config["postgres_16_version"], + config["postgres_15_version"], + config["postgres_14_version"], + ) + + +CHECKOUT_DIR = "docker_temp" + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--prj_ver", required=True) + parser.add_argument("--gh_token", required=True) + parser.add_argument("--pipeline", action="store_true") + parser.add_argument("--exec_path") + parser.add_argument("--is_test", action="store_true") + args = parser.parse_args() + + if args.pipeline: + if not args.exec_path: + raise ValueError("exec_path should be defined") + execution_path = args.exec_path + else: + execution_path = f"{os.getcwd()}/{CHECKOUT_DIR}" + initialize_env(execution_path, PROJECT_NAME, execution_path) + + os.chdir(execution_path) + pr_branch = f"release-{args.prj_ver}-{uuid.uuid4()}" + run(f"git checkout -b {pr_branch}") + + update_all_docker_files(args.prj_ver, execution_path) + run("git add --update .") + + commit_message = f"Bump docker to version {args.prj_ver}" + run(f'git commit -m "{commit_message}"') + if not args.is_test: + run(f"git push --set-upstream origin {pr_branch}") + create_pr( + args.gh_token, + pr_branch, + commit_message, + REPO_OWNER, + PROJECT_NAME, + MAIN_BRANCH, + ) + remove_cloned_code(execution_path) diff --git a/packaging_automation/update_package_properties.py b/packaging_automation/update_package_properties.py index 73e8569e..edfda8a4 100644 --- a/packaging_automation/update_package_properties.py +++ b/packaging_automation/update_package_properties.py @@ -1,452 +1,452 @@ -import argparse -import os -import re -import uuid -from dataclasses import dataclass -from datetime import datetime -from enum import Enum - -import pathlib2 -import string_utils -from parameters_validation import validate_parameters, parameter_validation - -from .common_tool_methods import ( - DEFAULT_ENCODING_FOR_FILE_HANDLING, - DEFAULT_UNICODE_ERROR_HANDLER, - create_pr, - find_nth_occurrence_position, - get_project_version_from_tag_name, - get_template_environment, - initialize_env, - remove_cloned_code, - run, -) -from .common_validations import is_version, is_tag - -BASE_PATH = pathlib2.Path(__file__).parent.absolute() -REPO_OWNER = "citusdata" -PROJECT_NAME = "packaging" - - -@dataclass() -class ProjectDetails: - name: str - version_suffix: str - github_repo_name: str - changelog_project_name: str - packaging_branch: str - - -class SupportedProject(Enum): - citus = ProjectDetails( - name="citus", - version_suffix="citus", - github_repo_name="citus", - changelog_project_name="citus", - packaging_branch="all-citus", - ) - citus_enterprise = ProjectDetails( - name="citus-enterprise", - version_suffix="citus", - github_repo_name="citus-enterprise", - changelog_project_name="citus-enterprise", - packaging_branch="all-enterprise", - ) - pg_auto_failover = ProjectDetails( - name="pg-auto-failover", - version_suffix="", - github_repo_name="pg_auto_failover", - changelog_project_name="pg_auto_failover", - packaging_branch="all-pgautofailover", - ) - pg_auto_failover_enterprise = ProjectDetails( - name="pg-auto-failover-enterprise", - version_suffix="", - github_repo_name="citus-ha", - changelog_project_name="pg_auto_failover-enterprise", - packaging_branch="all-pgautofailover-enterprise", - ) - - -@parameter_validation -def is_project_changelog_header(header: str): - if not header: - raise ValueError("header should be non-empty and should not be None") - # an example matching string is "### citus-enterprise v10.1.0 (July 14, 2021) ###" - if not re.match( - r"^### \w+[-]?\w+\sv\d+\.\d+\.\d+\s\(\w+\s\d+,\s\d+\)\s###$", header - ): - raise ValueError( - f"changelog header is in invalid format. Actual:{header} Expected: ### citus v8.3.3 (March 23, 2021) ### " - ) - - -@dataclass -class PackagePropertiesParams: - project: SupportedProject - project_version: str - fancy: bool - fancy_version_number: int - microsoft_email: str = "" - name_surname: str = "" - changelog_date: datetime = datetime.now() - changelog_entry: str = "" - - @property - def changelog_version_entry(self) -> str: - return f"{self.project_version}-{self.fancy_version_number}" - - @property - def spec_file_name(self) -> str: - return spec_file_name(self.project.value.name) - - @property - def pkgvars_template_file_name(self) -> str: - return f"{self.project.value.name}-pkgvars.tmpl" - - @property - def rpm_spec_template_file_name(self) -> str: - return f"{self.project.value.name}.spec.tmpl" - - @property - def version_number(self) -> str: - fancy_suffix = f"-{self.fancy_version_number}" if self.fancy else "" - return f"{self.project_version}{fancy_suffix}" - - @property - def version_number_with_project_name(self) -> str: - fancy_suffix = f"{self.fancy_version_number}" if self.fancy else "1" - return f"{self.project_version}{self.project_name_suffix}-{fancy_suffix}" - - @property - def rpm_version(self) -> str: - return f"{self.project_version}{self.project_name_suffix}" - - # debian changelog does not allow '_' character to be used in changelog. Therefore, we replace '_' with '-' - # to be able to release package without error - @property - def debian_changelog_project_version(self): - return f"{self.project_version.replace('_', '-')}" - - @property - def debian_changelog_version_header(self): - fancy_suffix = f"{self.fancy_version_number}" if self.fancy else "1" - return f"{self.debian_changelog_project_version}{self.project_name_suffix}-{fancy_suffix}" - - @property - def project_name_suffix(self) -> str: - return ( - self.project.value.version_suffix - if not self.project.value.version_suffix - else f".{self.project.value.version_suffix}" - ) - - @property - def changelog_project_name(self) -> str: - return self.project.value.name.replace("-", " ").replace("_", " ").title() - - @property - def rpm_header(self): - formatted_date = self.changelog_date.strftime("%a %b %d %Y") - return ( - f"* {formatted_date} - {self.name_surname} <{self.microsoft_email}> " - f"{self.version_number_with_project_name} " - ) - - @property - def debian_trailer(self): - formatted_date = self.changelog_date.strftime("%a, %d %b %Y %H:%M:%S %z") - return f" -- {self.name_surname} <{self.microsoft_email}> {formatted_date}\n" - - -def get_enum_from_changelog_project_name(project_name) -> SupportedProject: - for e in SupportedProject: - if e.value.changelog_project_name == project_name: - return e - raise ValueError( - f"{project_name} could not be found in supported project changelog names." - ) - - -def spec_file_name(project_name: str) -> str: - return f"{project_name}.spec" - - -def get_last_changelog_content(all_changelog_content: str) -> str: - second_changelog_index = find_nth_occurrence_position( - all_changelog_content, "###", 3 - ) - changelogs = all_changelog_content[:second_changelog_index] - lines = changelogs.splitlines() - if len(lines) < 1: - raise ValueError("At least one line should be in changelog") - changelog_header = lines[0] - if not changelog_header.startswith("###"): - raise ValueError("Changelog header should start with '###'") - return changelogs - - -def remove_paranthesis_from_string(param: str) -> str: - return re.sub(r"[(\[].*?[)\]]", "", param) - - -# truncates # chars , get the version an put paranthesis around version number adds 'stable; urgency=low' at the end -# changelog_header=> ### citus v8.3.3 (March 23, 2021) ### -# debian header => citus (10.0.3.citus-1) stable; urgency=low -@validate_parameters -def debian_changelog_header( - supported_project: SupportedProject, - project_version: str, - fancy: bool, - fancy_version_number: int, -) -> str: - package_properties_params = PackagePropertiesParams( - project=supported_project, - project_version=project_version, - fancy=fancy, - fancy_version_number=fancy_version_number, - ) - - version_on_changelog = package_properties_params.debian_changelog_version_header - - return ( - f"{supported_project.value.name} ({version_on_changelog}) stable; urgency=low" - ) - - -def get_debian_latest_changelog( - package_properties_params: PackagePropertiesParams, -) -> str: - lines = [] - lines.append( - debian_changelog_header( - package_properties_params.project, - package_properties_params.project_version, - package_properties_params.fancy, - package_properties_params.fancy_version_number, - ) - ) - lines.append(f" * {get_changelog_entry(package_properties_params)}") - lines.append(package_properties_params.debian_trailer) - debian_latest_changelog = "" - for i, line in enumerate(lines): - append_line = line if i in (0, len(lines) - 1) else "" + line - debian_latest_changelog = debian_latest_changelog + append_line + "\n\n" - return debian_latest_changelog[:-1] - - -def prepend_latest_changelog_into_debian_changelog( - package_properties_params: PackagePropertiesParams, changelog_file_path: str -) -> None: - debian_latest_changelog = get_debian_latest_changelog(package_properties_params) - with open( - changelog_file_path, - mode="r+", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - if not (package_properties_params.changelog_version_entry in reader.readline()): - reader.seek(0, 0) - old_changelog = reader.read() - changelog = f"{debian_latest_changelog}{old_changelog}" - reader.seek(0, 0) - reader.write(changelog) - else: - raise ValueError("Already version in the debian changelog") - - -@validate_parameters -def update_pkgvars( - package_properties_params: PackagePropertiesParams, - templates_path: str, - pkgvars_path: str, -) -> None: - env = get_template_environment(templates_path) - - version_str = package_properties_params.version_number_with_project_name - - template = env.get_template(package_properties_params.pkgvars_template_file_name) - - pkgvars_content = f"{template.render(version=version_str)}\n" - with open( - f"{pkgvars_path}/pkgvars", - "w", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as writer: - writer.write(pkgvars_content) - - -def rpm_changelog_history(spec_file_path: str) -> str: - with open( - spec_file_path, - "r", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as reader: - spec_content = reader.read() - changelog_index = spec_content.find("%changelog") - changelog_content = spec_content[changelog_index + len("%changelog") + 1 :] - - return changelog_content - - -def get_changelog_entry(package_properties_params: PackagePropertiesParams): - default_changelog_entry = ( - f"Official {package_properties_params.debian_changelog_project_version} release of " - f"{package_properties_params.changelog_project_name}" - ) - return ( - package_properties_params.changelog_entry - if package_properties_params.changelog_entry - else default_changelog_entry - ) - - -def get_rpm_changelog(package_properties_params: PackagePropertiesParams) -> str: - changelog = ( - package_properties_params.changelog_entry - if package_properties_params.changelog_entry - else get_changelog_entry(package_properties_params) - ) - header = package_properties_params.rpm_header - rpm_changelog = f"{header.strip()}\n- {changelog}" - - return rpm_changelog - - -def update_rpm_spec( - package_properties_params: PackagePropertiesParams, - spec_full_path: str, - templates_path: str, -) -> None: - env = get_template_environment(templates_path) - - rpm_version = package_properties_params.rpm_version - template = env.get_template(package_properties_params.rpm_spec_template_file_name) - - history_lines = rpm_changelog_history(spec_full_path).splitlines() - - if ( - len(history_lines) > 0 - and package_properties_params.version_number_with_project_name - in history_lines[0] - ): - raise ValueError( - f"{package_properties_params.project_version} already exists in rpm spec file" - ) - - latest_changelog = get_rpm_changelog(package_properties_params) - changelog = f"{latest_changelog}\n\n{rpm_changelog_history(spec_full_path)}" - - content = template.render( - version=package_properties_params.project_version, - rpm_version=rpm_version, - project_name=package_properties_params.project.value.name, - fancy_version_no=package_properties_params.fancy_version_number, - changelog=changelog, - ) - with open( - spec_full_path, - "w+", - encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, - errors=DEFAULT_UNICODE_ERROR_HANDLER, - ) as writer: - writer.write(content) - - -def validate_package_properties_params_for_update_all_changes( - package_props: PackagePropertiesParams, -): - if package_props.fancy_version_number < 0: - raise ValueError("Fancy version number should not be negative") - if not string_utils.is_email(package_props.microsoft_email): - raise ValueError("Microsoft email should be in email format") - if not package_props.name_surname: - raise ValueError("Name Surname should not be empty") - is_version(package_props.project_version) - - -@validate_parameters -def update_all_changes( - package_properties_params: PackagePropertiesParams, packaging_path: str -): - validate_package_properties_params_for_update_all_changes(package_properties_params) - templates_path = f"{BASE_PATH}/templates" - update_pkgvars(package_properties_params, templates_path, f"{packaging_path}") - prepend_latest_changelog_into_debian_changelog( - package_properties_params, f"{packaging_path}/debian/changelog" - ) - spec_full_path = f"{packaging_path}/{package_properties_params.spec_file_name}" - update_rpm_spec(package_properties_params, spec_full_path, templates_path) - - -CHECKOUT_DIR = "update_properties_temp" -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--gh_token", required=True) - parser.add_argument("--prj_name", choices=[r.name for r in SupportedProject]) - parser.add_argument("--tag_name", required=True) - parser.add_argument("--fancy_ver_no", type=int, choices=range(1, 10), default=1) - parser.add_argument("--email", required=True) - parser.add_argument("--name", required=True) - parser.add_argument("--date") - parser.add_argument("--pipeline", action="store_true") - parser.add_argument("--exec_path") - parser.add_argument("--is_test", action="store_true") - parser.add_argument("--changelog_entry") - arguments = parser.parse_args() - - prj_ver = get_project_version_from_tag_name(arguments.tag_name) - - project = SupportedProject[arguments.prj_name] - if arguments.pipeline: - if not arguments.exec_path: - raise ValueError("exec_path should be defined") - execution_path = arguments.exec_path - os.chdir(execution_path) - - else: - execution_path = f"{os.getcwd()}/{CHECKOUT_DIR}" - initialize_env(execution_path, PROJECT_NAME, execution_path) - os.chdir(execution_path) - run(f"git checkout {project.value.packaging_branch}") - - pr_branch = f"{project.value.packaging_branch}-{prj_ver}-{uuid.uuid4()}" - run(f"git checkout -b {pr_branch}") - exec_date = ( - datetime.strptime(arguments.date, "%Y.%m.%d %H:%M") - if arguments.date - else datetime.now().astimezone() - ) - is_tag(arguments.tag_name) - - fancy = arguments.fancy_ver_no > 1 - - package_properties = PackagePropertiesParams( - project=project, - project_version=prj_ver, - fancy=fancy, - fancy_version_number=arguments.fancy_ver_no, - name_surname=arguments.name, - microsoft_email=arguments.email, - changelog_date=exec_date, - changelog_entry=arguments.changelog_entry, - ) - update_all_changes(package_properties, execution_path) - - commit_message = f"Bump to {arguments.prj_name} {prj_ver}" - run(f'git commit -am "{commit_message}"') - - if not arguments.is_test: - run(f"git push --set-upstream origin {pr_branch}") - create_pr( - arguments.gh_token, - pr_branch, - commit_message, - REPO_OWNER, - PROJECT_NAME, - project.value.packaging_branch, - ) - if not arguments.is_test and not arguments.pipeline: - remove_cloned_code(execution_path) +import argparse +import os +import re +import uuid +from dataclasses import dataclass +from datetime import datetime +from enum import Enum + +import pathlib2 +import string_utils +from parameters_validation import validate_parameters, parameter_validation + +from .common_tool_methods import ( + DEFAULT_ENCODING_FOR_FILE_HANDLING, + DEFAULT_UNICODE_ERROR_HANDLER, + create_pr, + find_nth_occurrence_position, + get_project_version_from_tag_name, + get_template_environment, + initialize_env, + remove_cloned_code, + run, +) +from .common_validations import is_version, is_tag + +BASE_PATH = pathlib2.Path(__file__).parent.absolute() +REPO_OWNER = "citusdata" +PROJECT_NAME = "packaging" + + +@dataclass() +class ProjectDetails: + name: str + version_suffix: str + github_repo_name: str + changelog_project_name: str + packaging_branch: str + + +class SupportedProject(Enum): + citus = ProjectDetails( + name="citus", + version_suffix="citus", + github_repo_name="citus", + changelog_project_name="citus", + packaging_branch="all-citus", + ) + citus_enterprise = ProjectDetails( + name="citus-enterprise", + version_suffix="citus", + github_repo_name="citus-enterprise", + changelog_project_name="citus-enterprise", + packaging_branch="all-enterprise", + ) + pg_auto_failover = ProjectDetails( + name="pg-auto-failover", + version_suffix="", + github_repo_name="pg_auto_failover", + changelog_project_name="pg_auto_failover", + packaging_branch="all-pgautofailover", + ) + pg_auto_failover_enterprise = ProjectDetails( + name="pg-auto-failover-enterprise", + version_suffix="", + github_repo_name="citus-ha", + changelog_project_name="pg_auto_failover-enterprise", + packaging_branch="all-pgautofailover-enterprise", + ) + + +@parameter_validation +def is_project_changelog_header(header: str): + if not header: + raise ValueError("header should be non-empty and should not be None") + # an example matching string is "### citus-enterprise v10.1.0 (July 14, 2021) ###" + if not re.match( + r"^### \w+[-]?\w+\sv\d+\.\d+\.\d+\s\(\w+\s\d+,\s\d+\)\s###$", header + ): + raise ValueError( + f"changelog header is in invalid format. Actual:{header} Expected: ### citus v8.3.3 (March 23, 2021) ### " + ) + + +@dataclass +class PackagePropertiesParams: + project: SupportedProject + project_version: str + fancy: bool + fancy_version_number: int + microsoft_email: str = "" + name_surname: str = "" + changelog_date: datetime = datetime.now() + changelog_entry: str = "" + + @property + def changelog_version_entry(self) -> str: + return f"{self.project_version}-{self.fancy_version_number}" + + @property + def spec_file_name(self) -> str: + return spec_file_name(self.project.value.name) + + @property + def pkgvars_template_file_name(self) -> str: + return f"{self.project.value.name}-pkgvars.tmpl" + + @property + def rpm_spec_template_file_name(self) -> str: + return f"{self.project.value.name}.spec.tmpl" + + @property + def version_number(self) -> str: + fancy_suffix = f"-{self.fancy_version_number}" if self.fancy else "" + return f"{self.project_version}{fancy_suffix}" + + @property + def version_number_with_project_name(self) -> str: + fancy_suffix = f"{self.fancy_version_number}" if self.fancy else "1" + return f"{self.project_version}{self.project_name_suffix}-{fancy_suffix}" + + @property + def rpm_version(self) -> str: + return f"{self.project_version}{self.project_name_suffix}" + + # debian changelog does not allow '_' character to be used in changelog. Therefore, we replace '_' with '-' + # to be able to release package without error + @property + def debian_changelog_project_version(self): + return f"{self.project_version.replace('_', '-')}" + + @property + def debian_changelog_version_header(self): + fancy_suffix = f"{self.fancy_version_number}" if self.fancy else "1" + return f"{self.debian_changelog_project_version}{self.project_name_suffix}-{fancy_suffix}" + + @property + def project_name_suffix(self) -> str: + return ( + self.project.value.version_suffix + if not self.project.value.version_suffix + else f".{self.project.value.version_suffix}" + ) + + @property + def changelog_project_name(self) -> str: + return self.project.value.name.replace("-", " ").replace("_", " ").title() + + @property + def rpm_header(self): + formatted_date = self.changelog_date.strftime("%a %b %d %Y") + return ( + f"* {formatted_date} - {self.name_surname} <{self.microsoft_email}> " + f"{self.version_number_with_project_name} " + ) + + @property + def debian_trailer(self): + formatted_date = self.changelog_date.strftime("%a, %d %b %Y %H:%M:%S %z") + return f" -- {self.name_surname} <{self.microsoft_email}> {formatted_date}\n" + + +def get_enum_from_changelog_project_name(project_name) -> SupportedProject: + for e in SupportedProject: + if e.value.changelog_project_name == project_name: + return e + raise ValueError( + f"{project_name} could not be found in supported project changelog names." + ) + + +def spec_file_name(project_name: str) -> str: + return f"{project_name}.spec" + + +def get_last_changelog_content(all_changelog_content: str) -> str: + second_changelog_index = find_nth_occurrence_position( + all_changelog_content, "###", 3 + ) + changelogs = all_changelog_content[:second_changelog_index] + lines = changelogs.splitlines() + if len(lines) < 1: + raise ValueError("At least one line should be in changelog") + changelog_header = lines[0] + if not changelog_header.startswith("###"): + raise ValueError("Changelog header should start with '###'") + return changelogs + + +def remove_paranthesis_from_string(param: str) -> str: + return re.sub(r"[(\[].*?[)\]]", "", param) + + +# truncates # chars , get the version an put paranthesis around version number adds 'stable; urgency=low' at the end +# changelog_header=> ### citus v8.3.3 (March 23, 2021) ### +# debian header => citus (10.0.3.citus-1) stable; urgency=low +@validate_parameters +def debian_changelog_header( + supported_project: SupportedProject, + project_version: str, + fancy: bool, + fancy_version_number: int, +) -> str: + package_properties_params = PackagePropertiesParams( + project=supported_project, + project_version=project_version, + fancy=fancy, + fancy_version_number=fancy_version_number, + ) + + version_on_changelog = package_properties_params.debian_changelog_version_header + + return ( + f"{supported_project.value.name} ({version_on_changelog}) stable; urgency=low" + ) + + +def get_debian_latest_changelog( + package_properties_params: PackagePropertiesParams, +) -> str: + lines = [] + lines.append( + debian_changelog_header( + package_properties_params.project, + package_properties_params.project_version, + package_properties_params.fancy, + package_properties_params.fancy_version_number, + ) + ) + lines.append(f" * {get_changelog_entry(package_properties_params)}") + lines.append(package_properties_params.debian_trailer) + debian_latest_changelog = "" + for i, line in enumerate(lines): + append_line = line if i in (0, len(lines) - 1) else "" + line + debian_latest_changelog = debian_latest_changelog + append_line + "\n\n" + return debian_latest_changelog[:-1] + + +def prepend_latest_changelog_into_debian_changelog( + package_properties_params: PackagePropertiesParams, changelog_file_path: str +) -> None: + debian_latest_changelog = get_debian_latest_changelog(package_properties_params) + with open( + changelog_file_path, + mode="r+", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + if not (package_properties_params.changelog_version_entry in reader.readline()): + reader.seek(0, 0) + old_changelog = reader.read() + changelog = f"{debian_latest_changelog}{old_changelog}" + reader.seek(0, 0) + reader.write(changelog) + else: + raise ValueError("Already version in the debian changelog") + + +@validate_parameters +def update_pkgvars( + package_properties_params: PackagePropertiesParams, + templates_path: str, + pkgvars_path: str, +) -> None: + env = get_template_environment(templates_path) + + version_str = package_properties_params.version_number_with_project_name + + template = env.get_template(package_properties_params.pkgvars_template_file_name) + + pkgvars_content = f"{template.render(version=version_str)}\n" + with open( + f"{pkgvars_path}/pkgvars", + "w", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as writer: + writer.write(pkgvars_content) + + +def rpm_changelog_history(spec_file_path: str) -> str: + with open( + spec_file_path, + "r", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as reader: + spec_content = reader.read() + changelog_index = spec_content.find("%changelog") + changelog_content = spec_content[changelog_index + len("%changelog") + 1 :] + + return changelog_content + + +def get_changelog_entry(package_properties_params: PackagePropertiesParams): + default_changelog_entry = ( + f"Official {package_properties_params.debian_changelog_project_version} release of " + f"{package_properties_params.changelog_project_name}" + ) + return ( + package_properties_params.changelog_entry + if package_properties_params.changelog_entry + else default_changelog_entry + ) + + +def get_rpm_changelog(package_properties_params: PackagePropertiesParams) -> str: + changelog = ( + package_properties_params.changelog_entry + if package_properties_params.changelog_entry + else get_changelog_entry(package_properties_params) + ) + header = package_properties_params.rpm_header + rpm_changelog = f"{header.strip()}\n- {changelog}" + + return rpm_changelog + + +def update_rpm_spec( + package_properties_params: PackagePropertiesParams, + spec_full_path: str, + templates_path: str, +) -> None: + env = get_template_environment(templates_path) + + rpm_version = package_properties_params.rpm_version + template = env.get_template(package_properties_params.rpm_spec_template_file_name) + + history_lines = rpm_changelog_history(spec_full_path).splitlines() + + if ( + len(history_lines) > 0 + and package_properties_params.version_number_with_project_name + in history_lines[0] + ): + raise ValueError( + f"{package_properties_params.project_version} already exists in rpm spec file" + ) + + latest_changelog = get_rpm_changelog(package_properties_params) + changelog = f"{latest_changelog}\n\n{rpm_changelog_history(spec_full_path)}" + + content = template.render( + version=package_properties_params.project_version, + rpm_version=rpm_version, + project_name=package_properties_params.project.value.name, + fancy_version_no=package_properties_params.fancy_version_number, + changelog=changelog, + ) + with open( + spec_full_path, + "w+", + encoding=DEFAULT_ENCODING_FOR_FILE_HANDLING, + errors=DEFAULT_UNICODE_ERROR_HANDLER, + ) as writer: + writer.write(content) + + +def validate_package_properties_params_for_update_all_changes( + package_props: PackagePropertiesParams, +): + if package_props.fancy_version_number < 0: + raise ValueError("Fancy version number should not be negative") + if not string_utils.is_email(package_props.microsoft_email): + raise ValueError("Microsoft email should be in email format") + if not package_props.name_surname: + raise ValueError("Name Surname should not be empty") + is_version(package_props.project_version) + + +@validate_parameters +def update_all_changes( + package_properties_params: PackagePropertiesParams, packaging_path: str +): + validate_package_properties_params_for_update_all_changes(package_properties_params) + templates_path = f"{BASE_PATH}/templates" + update_pkgvars(package_properties_params, templates_path, f"{packaging_path}") + prepend_latest_changelog_into_debian_changelog( + package_properties_params, f"{packaging_path}/debian/changelog" + ) + spec_full_path = f"{packaging_path}/{package_properties_params.spec_file_name}" + update_rpm_spec(package_properties_params, spec_full_path, templates_path) + + +CHECKOUT_DIR = "update_properties_temp" +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--gh_token", required=True) + parser.add_argument("--prj_name", choices=[r.name for r in SupportedProject]) + parser.add_argument("--tag_name", required=True) + parser.add_argument("--fancy_ver_no", type=int, choices=range(1, 10), default=1) + parser.add_argument("--email", required=True) + parser.add_argument("--name", required=True) + parser.add_argument("--date") + parser.add_argument("--pipeline", action="store_true") + parser.add_argument("--exec_path") + parser.add_argument("--is_test", action="store_true") + parser.add_argument("--changelog_entry") + arguments = parser.parse_args() + + prj_ver = get_project_version_from_tag_name(arguments.tag_name) + + project = SupportedProject[arguments.prj_name] + if arguments.pipeline: + if not arguments.exec_path: + raise ValueError("exec_path should be defined") + execution_path = arguments.exec_path + os.chdir(execution_path) + + else: + execution_path = f"{os.getcwd()}/{CHECKOUT_DIR}" + initialize_env(execution_path, PROJECT_NAME, execution_path) + os.chdir(execution_path) + run(f"git checkout {project.value.packaging_branch}") + + pr_branch = f"{project.value.packaging_branch}-{prj_ver}-{uuid.uuid4()}" + run(f"git checkout -b {pr_branch}") + exec_date = ( + datetime.strptime(arguments.date, "%Y.%m.%d %H:%M") + if arguments.date + else datetime.now().astimezone() + ) + is_tag(arguments.tag_name) + + fancy = arguments.fancy_ver_no > 1 + + package_properties = PackagePropertiesParams( + project=project, + project_version=prj_ver, + fancy=fancy, + fancy_version_number=arguments.fancy_ver_no, + name_surname=arguments.name, + microsoft_email=arguments.email, + changelog_date=exec_date, + changelog_entry=arguments.changelog_entry, + ) + update_all_changes(package_properties, execution_path) + + commit_message = f"Bump to {arguments.prj_name} {prj_ver}" + run(f'git commit -am "{commit_message}"') + + if not arguments.is_test: + run(f"git push --set-upstream origin {pr_branch}") + create_pr( + arguments.gh_token, + pr_branch, + commit_message, + REPO_OWNER, + PROJECT_NAME, + project.value.packaging_branch, + ) + if not arguments.is_test and not arguments.pipeline: + remove_cloned_code(execution_path) diff --git a/packaging_automation/update_pgxn.py b/packaging_automation/update_pgxn.py index 90bd1093..defb20c1 100644 --- a/packaging_automation/update_pgxn.py +++ b/packaging_automation/update_pgxn.py @@ -1,79 +1,79 @@ -import argparse -import os -import uuid - -import pathlib2 - -from .common_tool_methods import ( - process_template_file, - write_to_file, - run, - initialize_env, - create_pr, - remove_cloned_code, -) - -REPO_OWNER = "citusdata" -PROJECT_NAME = "packaging" -CHECKOUT_DIR = "pgxn_temp" -BASE_PATH = pathlib2.Path(__file__).parent.absolute() - - -def update_meta_json(project_version: str, template_path: str, exec_path: str): - content = process_template_file(project_version, template_path, "META.tmpl.json") - dest_file_name = f"{exec_path}/META.json" - write_to_file(content, dest_file_name) - - -def update_pkgvars(project_version: str, template_path: str, exec_path: str): - content = process_template_file(project_version, template_path, "pkgvars.tmpl") - dest_file_name = f"{exec_path}/pkgvars" - write_to_file(content, dest_file_name) - - -def update_pgxn_files(project_version: str, template_path: str, exec_path: str): - update_meta_json(project_version, template_path, exec_path) - update_pkgvars(project_version, template_path, exec_path) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--prj_ver", required=True) - parser.add_argument("--gh_token", required=True) - parser.add_argument("--pipeline", action="store_true") - parser.add_argument("--exec_path") - parser.add_argument("--is_test", action="store_true") - args = parser.parse_args() - - github_token = args.gh_token - main_branch = "pgxn-citus" - - if args.pipeline: - if not args.exec_path: - raise ValueError("exec_path should be defined") - execution_path = args.exec_path - os.chdir(execution_path) - else: - execution_path = f"{os.getcwd()}/{CHECKOUT_DIR}" - initialize_env(execution_path, PROJECT_NAME, execution_path) - os.chdir(execution_path) - run(f"git checkout {main_branch}") - - pr_branch = f"pgxn-citus-push-{args.prj_ver}-{uuid.uuid4()}" - run(f"git checkout -b {pr_branch}") - template_path = f"{BASE_PATH}/templates/pgxn" - update_pgxn_files(args.prj_ver, template_path, execution_path) - commit_message = f"Bump pgxn to version {args.prj_ver}" - run(f'git commit -a -m "{commit_message}"') - if not args.is_test: - run(f"git push --set-upstream origin {pr_branch}") - create_pr( - github_token, - pr_branch, - commit_message, - REPO_OWNER, - PROJECT_NAME, - main_branch, - ) - if not args.is_test and not args.pipeline: - remove_cloned_code(execution_path) +import argparse +import os +import uuid + +import pathlib2 + +from .common_tool_methods import ( + process_template_file, + write_to_file, + run, + initialize_env, + create_pr, + remove_cloned_code, +) + +REPO_OWNER = "citusdata" +PROJECT_NAME = "packaging" +CHECKOUT_DIR = "pgxn_temp" +BASE_PATH = pathlib2.Path(__file__).parent.absolute() + + +def update_meta_json(project_version: str, template_path: str, exec_path: str): + content = process_template_file(project_version, template_path, "META.tmpl.json") + dest_file_name = f"{exec_path}/META.json" + write_to_file(content, dest_file_name) + + +def update_pkgvars(project_version: str, template_path: str, exec_path: str): + content = process_template_file(project_version, template_path, "pkgvars.tmpl") + dest_file_name = f"{exec_path}/pkgvars" + write_to_file(content, dest_file_name) + + +def update_pgxn_files(project_version: str, template_path: str, exec_path: str): + update_meta_json(project_version, template_path, exec_path) + update_pkgvars(project_version, template_path, exec_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--prj_ver", required=True) + parser.add_argument("--gh_token", required=True) + parser.add_argument("--pipeline", action="store_true") + parser.add_argument("--exec_path") + parser.add_argument("--is_test", action="store_true") + args = parser.parse_args() + + github_token = args.gh_token + main_branch = "pgxn-citus" + + if args.pipeline: + if not args.exec_path: + raise ValueError("exec_path should be defined") + execution_path = args.exec_path + os.chdir(execution_path) + else: + execution_path = f"{os.getcwd()}/{CHECKOUT_DIR}" + initialize_env(execution_path, PROJECT_NAME, execution_path) + os.chdir(execution_path) + run(f"git checkout {main_branch}") + + pr_branch = f"pgxn-citus-push-{args.prj_ver}-{uuid.uuid4()}" + run(f"git checkout -b {pr_branch}") + template_path = f"{BASE_PATH}/templates/pgxn" + update_pgxn_files(args.prj_ver, template_path, execution_path) + commit_message = f"Bump pgxn to version {args.prj_ver}" + run(f'git commit -a -m "{commit_message}"') + if not args.is_test: + run(f"git push --set-upstream origin {pr_branch}") + create_pr( + github_token, + pr_branch, + commit_message, + REPO_OWNER, + PROJECT_NAME, + main_branch, + ) + if not args.is_test and not args.pipeline: + remove_cloned_code(execution_path) diff --git a/packaging_automation/upload_to_package_cloud.py b/packaging_automation/upload_to_package_cloud.py index db4be9b5..119880cc 100644 --- a/packaging_automation/upload_to_package_cloud.py +++ b/packaging_automation/upload_to_package_cloud.py @@ -1,184 +1,184 @@ -import argparse -import glob -import os -import urllib -from dataclasses import dataclass -from typing import List - -import pathlib2 -import requests -from requests.auth import HTTPBasicAuth - -supported_distros = { - "el/7": 140, - "el/8": 205, - "el/9": 240, - "ol/7": 146, - "ol/8": 234, - "ol/9": 244, - "debian/stretch": 149, - "debian/buster": 150, - "debian/bullseye": 207, - "debian/bookworm": 215, - "ubuntu/bionic": 190, - "ubuntu/focal": 210, - "ubuntu/jammy": 237, - "ubuntu/kinetic": 261, -} - -supported_repos = [ - "citus-bot/sample", - "citusdata/enterprise", - "citusdata/community", - "citusdata/community-nightlies", - "citusdata/enterprise-nightlies", - "citusdata/azure", -] - - -@dataclass -class ReturnValue: - success_status: bool - message: str - file_name: str - distro: str - repo: str - - -@dataclass -class MultipleReturnValue: - def __init__(self, ret_vals: List[ReturnValue]): - self.return_values = ret_vals - - multiple_return_value: List[ReturnValue] - - def success_status(self) -> bool: - return len([r for r in self.return_values if not r.success_status]) == 0 - - -BASE_PATH = pathlib2.Path(__file__).parents[1] - - -def upload_to_package_cloud( - distro_name, package_name, package_cloud_token, repo_name -) -> ReturnValue: - distro_id = supported_distros[distro_name] - with open(package_name, "rb") as file_handle: - files = { - "package[distro_version_id]": (None, str(distro_id)), - "package[package_file]": (package_name, file_handle), - } - - package_query_url = f"https://{package_cloud_token}:@packagecloud.io/api/v1/repos/{repo_name}/packages.json" - print(f"Uploading package {os.path.basename(package_name)}") - response = requests.post(package_query_url, files=files, timeout=60) - print(f"Response from package cloud: {response.content}") - return ReturnValue( - response.ok, - response.content.decode("ascii"), - package_name, - distro_name, - repo_name, - ) - - -def upload_files_in_directory_to_package_cloud( - directoryName: str, - distro_name: str, - package_cloud_token: str, - repo_name: str, - current_branch: str, - main_branch: str, -) -> MultipleReturnValue: - if not main_branch: - raise ValueError("main_branch should be defined") - if main_branch != current_branch: - print( - f"Package publishing skipped since current branch is not equal to {main_branch}" - ) - return MultipleReturnValue(ret_vals=[]) - - ret_status: List[ReturnValue] = [] - - files = glob.glob(f"{directoryName}/**/*.*", recursive=True) - - for file in files: - if file.endswith((".rpm", ".deb")): - ret_val = upload_to_package_cloud( - distro_name, file, package_cloud_token, repo_name - ) - ret_status.append(ret_val) - - return MultipleReturnValue(ret_status) - - -def delete_package_from_package_cloud( - package_cloud_token: str, - repo_owner: str, - repo_name: str, - distro_name: str, - distro_version: str, - package_name: str, -) -> ReturnValue: - delete_url = ( - f"https://{package_cloud_token}:@packagecloud.io/api/v1/repos/{repo_owner}/{repo_name}/" - f"{distro_name}/{distro_version}/{package_name}" - ) - - response = requests.delete(delete_url, timeout=60) - return ReturnValue( - response.ok, response.content, package_name, distro_name, repo_name - ) - - -def package_exists( - package_cloud_token: str, - repo_owner: str, - repo_name: str, - package_name: str, - platform: str, -) -> bool: - query_url = ( - f"https://packagecloud.io/api/v1/repos/{repo_owner}/{repo_name}/search?" - f"q={package_name}&filter=all&dist={urllib.parse.quote(platform, safe='')}" - ) - response = requests.get( - query_url, auth=HTTPBasicAuth(package_cloud_token, ""), timeout=60 - ) - return response.ok - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--platform", choices=supported_distros.keys()) - parser.add_argument("--package_cloud_api_token", required=True) - parser.add_argument("--repository_name", required=True, choices=supported_repos) - parser.add_argument("--output_file_path", required=True) - parser.add_argument("--current_branch", required=True) - parser.add_argument("--main_branch", required=True) - - args = parser.parse_args() - - multiple_return_value = upload_files_in_directory_to_package_cloud( - args.output_file_path, - args.platform, - args.package_cloud_api_token, - args.repository_name, - args.current_branch, - args.main_branch, - ) - print(multiple_return_value.success_status()) - print(multiple_return_value.return_values) - for rv in multiple_return_value.return_values: - if not rv.success_status: - print( - f"Error occured while uploading file on package cloud. Error: {rv.message} Distro: {rv.distro} " - f"File Name: {os.path.basename(rv.file_name)} Repo Name: {rv.repo}" - ) - else: - print( - f"File successfully uploaded. Distro: {rv.distro} File Name: {os.path.basename(rv.file_name)} " - f"Repo Name: {rv.repo}" - ) - if not multiple_return_value.success_status(): - raise ValueError("There were some errors while uploading some packages") +import argparse +import glob +import os +import urllib +from dataclasses import dataclass +from typing import List + +import pathlib2 +import requests +from requests.auth import HTTPBasicAuth + +supported_distros = { + "el/7": 140, + "el/8": 205, + "el/9": 240, + "ol/7": 146, + "ol/8": 234, + "ol/9": 244, + "debian/stretch": 149, + "debian/buster": 150, + "debian/bullseye": 207, + "debian/bookworm": 215, + "ubuntu/bionic": 190, + "ubuntu/focal": 210, + "ubuntu/jammy": 237, + "ubuntu/kinetic": 261, +} + +supported_repos = [ + "citus-bot/sample", + "citusdata/enterprise", + "citusdata/community", + "citusdata/community-nightlies", + "citusdata/enterprise-nightlies", + "citusdata/azure", +] + + +@dataclass +class ReturnValue: + success_status: bool + message: str + file_name: str + distro: str + repo: str + + +@dataclass +class MultipleReturnValue: + def __init__(self, ret_vals: List[ReturnValue]): + self.return_values = ret_vals + + multiple_return_value: List[ReturnValue] + + def success_status(self) -> bool: + return len([r for r in self.return_values if not r.success_status]) == 0 + + +BASE_PATH = pathlib2.Path(__file__).parents[1] + + +def upload_to_package_cloud( + distro_name, package_name, package_cloud_token, repo_name +) -> ReturnValue: + distro_id = supported_distros[distro_name] + with open(package_name, "rb") as file_handle: + files = { + "package[distro_version_id]": (None, str(distro_id)), + "package[package_file]": (package_name, file_handle), + } + + package_query_url = f"https://{package_cloud_token}:@packagecloud.io/api/v1/repos/{repo_name}/packages.json" + print(f"Uploading package {os.path.basename(package_name)}") + response = requests.post(package_query_url, files=files, timeout=60) + print(f"Response from package cloud: {response.content}") + return ReturnValue( + response.ok, + response.content.decode("ascii"), + package_name, + distro_name, + repo_name, + ) + + +def upload_files_in_directory_to_package_cloud( + directoryName: str, + distro_name: str, + package_cloud_token: str, + repo_name: str, + current_branch: str, + main_branch: str, +) -> MultipleReturnValue: + if not main_branch: + raise ValueError("main_branch should be defined") + if main_branch != current_branch: + print( + f"Package publishing skipped since current branch is not equal to {main_branch}" + ) + return MultipleReturnValue(ret_vals=[]) + + ret_status: List[ReturnValue] = [] + + files = glob.glob(f"{directoryName}/**/*.*", recursive=True) + + for file in files: + if file.endswith((".rpm", ".deb")): + ret_val = upload_to_package_cloud( + distro_name, file, package_cloud_token, repo_name + ) + ret_status.append(ret_val) + + return MultipleReturnValue(ret_status) + + +def delete_package_from_package_cloud( + package_cloud_token: str, + repo_owner: str, + repo_name: str, + distro_name: str, + distro_version: str, + package_name: str, +) -> ReturnValue: + delete_url = ( + f"https://{package_cloud_token}:@packagecloud.io/api/v1/repos/{repo_owner}/{repo_name}/" + f"{distro_name}/{distro_version}/{package_name}" + ) + + response = requests.delete(delete_url, timeout=60) + return ReturnValue( + response.ok, response.content, package_name, distro_name, repo_name + ) + + +def package_exists( + package_cloud_token: str, + repo_owner: str, + repo_name: str, + package_name: str, + platform: str, +) -> bool: + query_url = ( + f"https://packagecloud.io/api/v1/repos/{repo_owner}/{repo_name}/search?" + f"q={package_name}&filter=all&dist={urllib.parse.quote(platform, safe='')}" + ) + response = requests.get( + query_url, auth=HTTPBasicAuth(package_cloud_token, ""), timeout=60 + ) + return response.ok + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--platform", choices=supported_distros.keys()) + parser.add_argument("--package_cloud_api_token", required=True) + parser.add_argument("--repository_name", required=True, choices=supported_repos) + parser.add_argument("--output_file_path", required=True) + parser.add_argument("--current_branch", required=True) + parser.add_argument("--main_branch", required=True) + + args = parser.parse_args() + + multiple_return_value = upload_files_in_directory_to_package_cloud( + args.output_file_path, + args.platform, + args.package_cloud_api_token, + args.repository_name, + args.current_branch, + args.main_branch, + ) + print(multiple_return_value.success_status()) + print(multiple_return_value.return_values) + for rv in multiple_return_value.return_values: + if not rv.success_status: + print( + f"Error occured while uploading file on package cloud. Error: {rv.message} Distro: {rv.distro} " + f"File Name: {os.path.basename(rv.file_name)} Repo Name: {rv.repo}" + ) + else: + print( + f"File successfully uploaded. Distro: {rv.distro} File Name: {os.path.basename(rv.file_name)} " + f"Repo Name: {rv.repo}" + ) + if not multiple_return_value.success_status(): + raise ValueError("There were some errors while uploading some packages") diff --git a/packaging_automation/validate_build_output.py b/packaging_automation/validate_build_output.py index fdccd88f..e5bb8e43 100644 --- a/packaging_automation/validate_build_output.py +++ b/packaging_automation/validate_build_output.py @@ -1,17 +1,17 @@ -import argparse - -from .packaging_warning_handler import validate_output -from .common_tool_methods import PackageType -from pathlib import Path - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--output_file", required=True) - parser.add_argument("--ignore_file", required=True) - parser.add_argument( - "--package_type", choices=[p.name for p in PackageType], required=True - ) - - args = parser.parse_args() - build_output = Path(args.output_file).read_text(encoding="utf-8") - validate_output(build_output, args.ignore_file, PackageType[args.package_type]) +import argparse + +from .packaging_warning_handler import validate_output +from .common_tool_methods import PackageType +from pathlib import Path + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--output_file", required=True) + parser.add_argument("--ignore_file", required=True) + parser.add_argument( + "--package_type", choices=[p.name for p in PackageType], required=True + ) + + args = parser.parse_args() + build_output = Path(args.output_file).read_text(encoding="utf-8") + validate_output(build_output, args.ignore_file, PackageType[args.package_type]) diff --git a/packaging_automation/write_postgres_versions_into_file.py b/packaging_automation/write_postgres_versions_into_file.py index a696a9b8..a0861e51 100644 --- a/packaging_automation/write_postgres_versions_into_file.py +++ b/packaging_automation/write_postgres_versions_into_file.py @@ -1,11 +1,11 @@ -import argparse - -from .citus_package import write_postgres_versions_into_file - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--project_version", required=True) - parser.add_argument("--input_files_dir", required=True) - - args = parser.parse_args() - write_postgres_versions_into_file(args.input_files_dir, args.project_version) +import argparse + +from .citus_package import write_postgres_versions_into_file + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_version", required=True) + parser.add_argument("--input_files_dir", required=True) + + args = parser.parse_args() + write_postgres_versions_into_file(args.input_files_dir, args.project_version)