diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..a199226df --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,25 @@ +--- +name: Bug report +about: Create a bug report or request for help +title: '' +labels: '' +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**Versions** +Details of your environment, including: + - Tableau Server version (or note if using Tableau Online) + - Python version + - TSC library version + +**To Reproduce** +Steps to reproduce the behavior. Please include a code snippet where possible. + +**Results** +What are the results or error messages received? + +**NOTE:** Be careful not to post user names, passwords, auth tokens or any other private or sensitive information. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..b7a7a926d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,28 @@ +--- +name: Feature Request +title: "[REQUEST TYPE] [FEATURE TITLE]" +about: Suggest a feature that could be added to the client +labels: enhancement, needs investigation +--- + +## Summary +A one line description of the request. Skip this if the title is already a good summary. + + +## Request Type +If you know, say which of these types your request is in the title, and follow the suggestions for that type when writing your description. + +****Type 1: support a REST API:**** +If it is functionality that already exists in the [REST API](https://help.tableau.com/current/api/rest_api/en-us/REST/rest_api_ref.htm), example API calls are the clearest way to explain your request. + +****Type 2: add a REST API and support it in tsc.**** +If it is functionality that can be achieved somehow on Tableau Server but not through the REST API, describe the current way to do it. (e.g: functionality that is available in the Web UI, or by using the Hyper API). For UI, screenshots can be helpful. + +****Type 3: new functionality**** +Requests for totally new functionality will generally be passed to the relevant dev team, but we probably can't give any useful estimate of how or when it might be implemented. If it is a feature that is 'about' the API or programmable access, here might be the best place to suggest it, but generally feature requests will be more visible in the [Tableau Community Ideas](https://community.tableau.com/s/ideas) forum and should go there instead. + + +## Description +A clear and concise description of what the feature request is. If you think that the value of this feature might not be obvious, include information like how often it is needed, amount of work saved, etc. If your feature request is related to a file or server in a specific state, describe the starting state when the feature can be used, and the end state after using it. If it involves modifying files, an example file may be helpful. +![](https://img.shields.io/badge/warning-Be%20careful%20not%20to%20post%20user%20names%2C%20passwords%2C%20auth%20tokens%20or%20any%20other%20private%20or%20sensitive%20information-red) + diff --git a/.github/workflows/code-coverage.yml b/.github/workflows/code-coverage.yml new file mode 100644 index 000000000..70bc845e9 --- /dev/null +++ b/.github/workflows/code-coverage.yml @@ -0,0 +1,39 @@ +name: Check Test Coverage + +on: + pull_request: + branches: + - development + +jobs: + build: + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest] + python-version: ['3.10'] + + runs-on: ${{ matrix.os }} + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} on ${{ matrix.os }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -e .[test] + + # https://github.com/marketplace/actions/pytest-coverage-comment + - name: Generate coverage report + run: pytest --junitxml=pytest.xml --cov=tableauserverclient test/ | tee pytest-coverage.txt + + - name: Comment on pull request with coverage + continue-on-error: true + uses: MishaKav/pytest-coverage-comment@main + with: + pytest-coverage-path: ./pytest-coverage.txt diff --git a/.github/workflows/meta-checks.yml b/.github/workflows/meta-checks.yml new file mode 100644 index 000000000..0e2b425ee --- /dev/null +++ b/.github/workflows/meta-checks.yml @@ -0,0 +1,49 @@ +name: types and style checks + +on: [push, pull_request] + +jobs: + build: + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + python-version: ['3.10'] + + runs-on: ${{ matrix.os }} + + steps: + - name: Get pip cache dir + id: pip-cache + shell: bash + run: | + echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT + + - name: cache + uses: actions/cache@v4 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('pyproject.toml') }} + restore-keys: | + ${{ runner.os }}-${{ matrix.python-version }}-pip- + + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} on ${{ matrix.os }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -e .[test] + + - name: Format with black + run: | + black --check --line-length 120 tableauserverclient samples test + + - name: Run Mypy tests + if: always() + run: | + mypy --show-error-codes --disable-error-code misc --disable-error-code import --implicit-optional tableauserverclient test diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml new file mode 100644 index 000000000..cae0f409c --- /dev/null +++ b/.github/workflows/publish-pypi.yml @@ -0,0 +1,40 @@ +name: Publish to PyPi + +# This will publish a package to TestPyPi (and real Pypi if run on master) with a version +# number generated by versioneer from the most recent tag looking like v____ +# TODO: maybe move this into the package job so all release-based actions are together +on: + workflow_dispatch: + push: + tags: + - 'v*.*.*' + +jobs: + build-n-publish: + name: Build dist files for PyPi + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: actions/setup-python@v5 + with: + python-version: 3.9 + - name: Build dist files + run: | + python -m pip install --upgrade pip + pip install -e .[test] build + python -m build + git describe --tag --dirty --always + + - name: Publish distribution 📦 to Test PyPI # always run + uses: pypa/gh-action-pypi-publish@release/v1 # license BSD-2 + with: + password: ${{ secrets.TEST_PYPI_API_TOKEN }} + repository_url: https://test.pypi.org/legacy/ + + - name: Publish distribution 📦 to PyPI + if: ${{ github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/v') }} + uses: pypa/gh-action-pypi-publish@release/v1 # license BSD-2 + with: + password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/.github/workflows/pypi-smoke-tests.yml b/.github/workflows/pypi-smoke-tests.yml new file mode 100644 index 000000000..45ea94400 --- /dev/null +++ b/.github/workflows/pypi-smoke-tests.yml @@ -0,0 +1,36 @@ +# This workflow will install TSC from pypi and validate that it runs. For more information see: +# https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: Pypi smoke tests + +on: + workflow_dispatch: + schedule: + - cron: 0 11 * * * # Every day at 11AM UTC (7AM EST) + +permissions: + contents: read + +jobs: + build: + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + python-version: ['3.x'] + + runs-on: ${{ matrix.os }} + + steps: + - name: Set up Python ${{ matrix.python-version }} on ${{ matrix.os }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + - name: pip install + run: | + pip uninstall tableauserverclient + pip install tableauserverclient + - name: Launch app + run: | + python -c "import tableauserverclient as TSC + server = TSC.Server('http://example.com', use_server_version=False)" diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml new file mode 100644 index 000000000..2e197cf20 --- /dev/null +++ b/.github/workflows/run-tests.yml @@ -0,0 +1,55 @@ +name: Python tests + +on: + pull_request: {} + push: + branches: + - development + - master + +jobs: + build: + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + python-version: ['3.9', '3.10', '3.11', '3.12', '3.13'] + + runs-on: ${{ matrix.os }} + + steps: + - name: Get pip cache dir + id: pip-cache + shell: bash + run: | + echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT + + - name: cache + uses: actions/cache@v4 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('pyproject.toml') }} + restore-keys: | + ${{ runner.os }}-${{ matrix.python-version }}-pip- + + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} on ${{ matrix.os }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -e .[test] build + + - name: Test with pytest + if: always() + run: | + pytest test + + - name: Test build + if: always() + run: | + python -m build diff --git a/.github/workflows/slack.yml b/.github/workflows/slack.yml new file mode 100644 index 000000000..2ecb0be7f --- /dev/null +++ b/.github/workflows/slack.yml @@ -0,0 +1,20 @@ +name: 💬 Send Message to Slack 🚀 + +on: [push, pull_request, issues] + +jobs: + slack-notifications: + continue-on-error: true + runs-on: ubuntu-20.04 + name: Sends a message to Slack when a push, a pull request or an issue is made + steps: + - name: Send message to Slack API + continue-on-error: true + uses: archive/github-actions-slack@v2.8.0 + id: notify + with: + slack-bot-user-oauth-access-token: ${{ secrets.SLACK_BOT_USER_OAUTH_ACCESS_TOKEN }} + slack-channel: C019HCX84L9 + slack-text: Hello! Event "${{ github.event_name }}" in "${{ github.repository }}" 🤓 + - name: Result from "Send Message" + run: echo "The result was ${{ steps.notify.outputs.slack-result }}" diff --git a/.gitignore b/.gitignore index 5f5db36d7..b3b3ff80f 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,7 @@ var/ *.egg-info/ .installed.cfg *.egg +pip-wheel-metadata/ # PyInstaller # Usually these files are written by a python script from a template @@ -76,15 +77,20 @@ target/ # pyenv .python-version +# poetry +poetry.lock + # celery beat schedule file celerybeat-schedule # dotenv .env +env.py # virtualenv venv/ ENV/ +.venv/ # Spyder project settings .spyderproject @@ -92,7 +98,8 @@ ENV/ # Rope project settings .ropeproject - +# VSCode project settings +.vscode/ # macOS.gitignore from https://github.com/github/gitignore *.DS_Store @@ -148,3 +155,5 @@ $RECYCLE.BIN/ docs/_site/ docs/.jekyll-metadata docs/Gemfile.lock +samples/credentials +.venv/ diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 01ad30886..000000000 --- a/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: python -python: - - "2.7" - - "3.3" - - "3.4" - - "3.5" - - "3.6" - - "pypy" -# command to install dependencies -install: - - "pip install -e ." - - "pip install pycodestyle" -# command to run tests -script: - # Tests - - python setup.py test - # pep8 - disabled for now until we can scrub the files to make sure we pass before turning it on - - pycodestyle tableauserverclient test samples diff --git a/CHANGELOG.md b/CHANGELOG.md index 77aab3ed7..c018294d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,157 @@ + +## 0.18.0 (6 April 2022) +* Switched to using defused_xml for xml attack protection +* added linting and type hints +* improve experience with self-signed certificates/invalid ssl +* updated samples +* new item types: metrics, revisions for datasources and workbooks +* features: support adding flows to schedules, exporting workbooks to powerpoint +* fixes: delete extracts + +## 0.17.0 (20 October 2021) +* Added support for accepting parameters for post request of the metadata api (#850) +* Fixed jobs.get_by_id(job_id) example & reference docs (#867, #868) +* Fixed handling for workbooks in personal spaces which do not have projectID or Name (#875) +* Updated links to Data Source Methods page in REST API docs (#879) +* Unified arguments of sample scripts (#889) +* Updated docs for - links to Datasource API (#879) , sample scripts (#892) & metadata query (#896) +* Added support for scheduling DataUpdate Jobs (#891) +* Exposed the fileuploads API endpoint (#894) +* Added a new sample & documentation for metadata API (#895, #896) +* Added support to the package for getting flow run status, as well as the ability to cancel flow runs. (#884) +* Added jobs.wait_for_job method (#903) +* Added description support for datasources item (#912) +* Dropped support for Python 3.5 (#911) + +## 0.16.0 (15 July 2021) +* Documentation updates (#800, #818, #839, #842) +* Fixed data alert repr in subscription item (#821) +* Added support for Data Quality Warning (#836) +* Added support for renaming datasources (#843) +* Improved Datasource tests (#843) +* Updated catalog obfuscation field (#844) +* Fixed revision limit field in site_item.py file (#847) +* Added the Missing content permission field- LockedToProjectWithoutNested (#856) + +## 0.15.0 (16 Feb 2021) +* Added support for python version 3.9 (#744) +* Added support for 'Get View by ID' (#750) +* Added docs and test data to MANIFEST.in file (#780) +* Added owner_id property to ProjectItem (#784) +* Added support for skipping connection check while publishing workbook (#791) +* Added support for 'Update Subscription' (#794) +* Added support for 'Get Groups for a User' (#799) +* Improved debug logging by including put/post request contents (#743) +* Improved local and active-directory group creation (#770) +* Improved 'Update Group' to match server requests/responses (#772) +* Improved SiteItem with new properties and functions (#777) +* Improved SubscriptionItem with new properties (#794) +* Improved the 'type' property of TaskItem to convert server response to enum (#796) +* Improved repository to use Github Actions for running tests/linter (#798) +* Fixed data_acceleration field causing error in workbook update payload (#741) + +## 0.14.1 (9 Dec 2020) +* Fixed filter query issue for server version below 2020.1 (#745) +* Fixed large workbook/datasource publish issue (#757) + +## 0.14.0 (6 Nov 2020) +* Added django-style filtering and sorting (#615) +* Added encoding tag-name before deleting (#687) +* Added 'Execute' Capability to permissions (#700) +* Added support for publishing workbook using file objects (#704) +* Added new fields to datasource_item (#705) +* Added all fields for users.get to get email and fullname (#713) +* Added support publishing datasource using file objects (#714) +* Improved request options by removing manual query param generation (#686) +* Improved publish_workbook sample to take in site (#694) +* Improved schedules.update() by removing constraint that required an interval (#711) +* Fixed site update/create not checking booleans properly (#723) + +## 0.13 (1 Sept 2020) +* Added notes field to JobItem (#571) +* Added webpage_url field to WorkbookItem (#661) +* Added support for switching between sites (#655) +* Added support for querying favorites for a user (#656) +* Added support for Python 3.8 (#659) +* Added support for Data Alerts (#667) +* Added support for basic Extract operations - Create, Delete, en/re/decrypt for site (#672) +* Added support for creating and querying Active Directory groups (#674) +* Added support for asynchronously updating a group (#674) +* Improved handling of invalid dates (#529) +* Improved consistency of update_permission endpoints (#668) +* Documentation updates (#658, #669, #670, #673, #683) + +## 0.12.1 (22 July 2020) + +* Fixed login.py sample to properly handle sitename (#652) + +## 0.12 (10 July 2020) + +* Added hidden_views parameter to workbook publish method (#614) +* Added simple paging endpoint for GraphQL/Metadata API (#623) +* Added endpoints to Metadata API for retrieving backfill/eventing status (#626) +* Added maxage parameter to CSV and PDF export options (#635) +* Added support for querying, adding, and deleting favorites (#638) +* Added a sample for publishing datasources (#644) + +## 0.11 (1 May 2020) + +* Added more fields to Data Acceleration config (#588) +* Added OpenID as an auth setting enum (#610) +* Added support for Data Acceleration Reports (#596) +* Added support for view permissions (#526) +* Materialized views changed to Data Acceleration (#576) +* Improved consistency across workbook/datasource endpoints (#570) +* Fixed print error in update_connection.py (#602) +* Fixed log error in add user endpoint (#608) + +## 0.10 (21 Feb 2020) + +* Added a way to handle non-xml errors (#515) +* Added Webhooks endpoints for create, delete, get, list, and test (#523, #532) +* Added delete method in the tasks endpoint (#524) +* Added description attribute to WorkbookItem (#533) +* Added support for materializeViews as schedule and task types (#542) +* Added warnings to schedules (#550, #551) +* Added ability to update parent_id attribute of projects (#560, #567) +* Improved filename behavior for download endpoints (#517) +* Improved logging (#508) +* Fixed runtime error in permissions endpoint (#513) +* Fixed move_workbook_sites sample (#503) +* Fixed project permissions endpoints (#527) +* Fixed login.py sample to accept site name (#549) + +## 0.9 (4 Oct 2019) + +* Added Metadata API endpoints (#431) +* Added site settings for Data Catalog and Prep Conductor (#434) +* Added new fields to ViewItem (#331) +* Added support and samples for Tableau Server Personal Access Tokens (#465) +* Added Permissions endpoints (#429) +* Added tags to ViewItem (#470) +* Added Databases and Tables endpoints (#445) +* Added Flow endpoints (#494) +* Added ability to filter projects by topLevelProject attribute (#497) +* Improved server_info endpoint error handling (#439) +* Improved Pager to take in keyword arguments (#451) +* Fixed UUID serialization error while publishing workbook (#449) +* Fixed materalized views in request body for update_workbook (#461) + +## 0.8.1 (17 July 2019) + +* Fixed update_workbook endpoint (#454) + +## 0.8 (8 Apr 2019) + +* Added Max Age to download view image request (#360) +* Added Materialized Views (#378, #394, #396) +* Added PDF export of Workbook (#376) +* Added Support User Role (#392) +* Added Flows (#403) +* Updated Pager to handle un-paged results (#322) +* Fixed checked upload (#309, #319, #326, #329) +* Fixed embed_password field on publish (#416) + ## 0.7 (2 Jul 2018) * Added cancel job (#299) diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 25ac5718b..a69cfff21 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -4,26 +4,79 @@ The following people have contributed to this project to make it possible, and w ## Contributors +* [jacalata](https://github.com/jacalata) +* [jorwoods](https://github.com/jorwoods) +* [t8y8](https://github.com/t8y8) +* [bcantoni](https://github.com/bcantoni) +* [shinchris](https://github.com/shinchris) +* [vogelsgesang](https://github.com/vogelsgesang) +* [lbrendanl](https://github.com/lbrendanl) +* [LGraber](https://github.com/LGraber) +* [gaoang2148](https://github.com/gaoang2148) +* [benlower](https://github.com/benlower) +* [liu-rebecca](https://github.com/liu-rebecca) +* [guodah](https://github.com/guodah) +* [jdomingu](https://github.com/jdomingu) +* [kykrueger](https://github.com/kykrueger) +* [jz-huang](https://github.com/jz-huang) +* [opus-42](https://github.com/opus-42) +* [markm-io](https://github.com/markm-io) +* [graysonarts](https://github.com/graysonarts) +* [d45](https://github.com/d45) +* [preguraman](https://github.com/preguraman) +* [sotnich](https://github.com/sotnich) +* [mmuttreja-tableau](https://github.com/mmuttreja-tableau) +* [dependabot[bot]](https://github.com/apps/dependabot) +* [scuml](https://github.com/scuml) +* [ovinis](https://github.com/ovinis) +* [FFMMM](https://github.com/FFMMM) +* [martinbpeters](https://github.com/martinbpeters) +* [talvalin](https://github.com/talvalin) +* [dzucker-tab](https://github.com/dzucker-tab) +* [a-torres-2](https://github.com/a-torres-2) +* [nnevalainen](https://github.com/nnevalainen) +* [mbren](https://github.com/mbren) +* [wolkiewiczk](https://github.com/wolkiewiczk) +* [jacobj10](https://github.com/jacobj10) +* [hugoboos](https://github.com/hugoboos) +* [grbritz](https://github.com/grbritz) +* [fpagliar](https://github.com/fpagliar) +* [bskim45](https://github.com/bskim45) +* [baixin137](https://github.com/baixin137) +* [jessicachen79](https://github.com/jessicachen79) +* [gconklin](https://github.com/gconklin) * [geordielad](https://github.com/geordielad) -* [Hugo Stijns](https://github.com/hugoboos) -* [kovner](https://github.com/kovner) -* [Talvalin](https://github.com/Talvalin) -* [Chris Toomey](https://github.com/cmtoomey) -* [Vathsala Achar](https://github.com/VathsalaAchar) -* [Graeme Britz](https://github.com/grbritz) -* [Russ Goldin](https://github.com/tagyoureit) -* [William Lang](https://github.com/williamlang) -* [Jim Morris](https://github.com/jimbodriven) -* [BingoDinkus](https://github.com/BingoDinkus) -* [Sergey Sotnichenko](https://github.com/sotnich) - -## Core Team - -* [Shin Chris](https://github.com/shinchris) -* [Lee Graber](https://github.com/lgraber) -* [Tyler Doyle](https://github.com/t8y8) -* [Russell Hay](https://github.com/RussTheAerialist) -* [Ben Lower](https://github.com/benlower) -* [Jared Dominguez](https://github.com/jdomingu) -* [Jackson Huang](https://github.com/jz-huang) -* [Brendan Lee](https://github.com/lbrendanl) +* [fossabot](https://github.com/fossabot) +* [daniel1608](https://github.com/daniel1608) +* [annematronic](https://github.com/annematronic) +* [rshide](https://github.com/rshide) +* [VathsalaAchar](https://github.com/VathsalaAchar) +* [TrimPeachu](https://github.com/TrimPeachu) +* [ajbosco](https://github.com/ajbosco) +* [jimbodriven](https://github.com/jimbodriven) +* [ltiffanydev](https://github.com/ltiffanydev) +* [martydertz](https://github.com/martydertz) +* [r-richmond](https://github.com/r-richmond) +* [sfarr15](https://github.com/sfarr15) +* [tagyoureit](https://github.com/tagyoureit) +* [tjones-commits](https://github.com/tjones-commits) +* [yoshichan5](https://github.com/yoshichan5) +* [wlodi83](https://github.com/wlodi83) +* [anipmehta](https://github.com/anipmehta) +* [cmtoomey](https://github.com/cmtoomey) +* [pes-magic](https://github.com/pes-magic) +* [illonage](https://github.com/illonage) +* [jayvdb](https://github.com/jayvdb) +* [jorgeFons](https://github.com/jorgeFons) +* [Kovner](https://github.com/Kovner) +* [LarsBreddemann](https://github.com/LarsBreddemann) +* [lboynton](https://github.com/lboynton) +* [maddy-at-leisure](https://github.com/maddy-at-leisure) +* [narcolino-tableau](https://github.com/narcolino-tableau) +* [PatrickfBraz](https://github.com/PatrickfBraz) +* [paulvic](https://github.com/paulvic) +* [shrmnk](https://github.com/shrmnk) +* [TableauKyle](https://github.com/TableauKyle) +* [bossenti](https://github.com/bossenti) +* [ma7tcsp](https://github.com/ma7tcsp) +* [toomyem](https://github.com/toomyem) diff --git a/LICENSE b/LICENSE index 6222b2e80..22f90640f 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2016 Tableau +Copyright (c) 2022 Tableau Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/MANIFEST.in b/MANIFEST.in index ae0a2ec7d..9b7512fb9 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,22 @@ -include versioneer.py -include tableauserverclient/_version.py +include CHANGELOG.md +include contributing.md +include CONTRIBUTORS.md include LICENSE include LICENSE.versioneer +include README.md +include tableauserverclient/_version.py +include versioneer.py +recursive-include docs *.md +recursive-include samples *.py +recursive-include samples *.txt +recursive-include test *.csv +recursive-include test *.dict +recursive-include test *.hyper +recursive-include test *.json +recursive-include test *.pdf +recursive-include test *.png +recursive-include test *.py +recursive-include test *.xml +recursive-include test *.tde +global-include *.pyi +global-include *.typed diff --git a/README.md b/README.md index 51e23549a..5c80f337e 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ # Tableau Server Client (Python) -[![Tableau Supported](https://img.shields.io/badge/Support%20Level-Tableau%20Supported-53bd92.svg)](https://www.tableau.com/support-levels-it-and-developer-tools) + +[![Tableau Supported](https://img.shields.io/badge/Support%20Level-Tableau%20Supported-53bd92.svg)](https://www.tableau.com/support-levels-it-and-developer-tools) [![Build Status](https://github.com/tableau/server-client-python/actions/workflows/run-tests.yml/badge.svg)](https://github.com/tableau/server-client-python/actions) +[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Ftableau%2Fserver-client-python.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Ftableau%2Fserver-client-python?ref=badge_shield) Use the Tableau Server Client (TSC) library to increase your productivity as you interact with the Tableau Server REST API. With the TSC library you can do almost everything that you can do with the REST API, including: @@ -7,8 +9,14 @@ Use the Tableau Server Client (TSC) library to increase your productivity as you * Create users and groups. * Query projects, sites, and more. -This repository contains Python source code and sample files. +This repository contains Python source code for the library and sample files showing how to use it. As of September 2024, support for Python 3.7 and 3.8 will be dropped - support for older versions of Python aims to match https://devguide.python.org/versions/ -For more information on installing and using TSC, see the documentation: +To see sample code that works directly with the REST API (in Java, Python, or Postman), visit the [REST API Samples](https://github.com/tableau/rest-api-samples) repo. +For more information on installing and using TSC, see the documentation: + +To contribute, see our [Developer Guide](https://tableau.github.io/server-client-python/docs/dev-guide). A list of all our contributors to date is in [CONTRIBUTORS.md]. + +## License +[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Ftableau%2Fserver-client-python.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Ftableau%2Fserver-client-python?ref=badge_large) diff --git a/contributing.md b/contributing.md index c95191e0e..a0132919f 100644 --- a/contributing.md +++ b/contributing.md @@ -10,12 +10,9 @@ Contribution can include, but are not limited to, any of the following: * Fix an Issue/Bug * Add/Fix documentation -Contributions must follow the guidelines outlined on the [Tableau Organization](http://tableau.github.io/) page, though filing an issue or requesting -a feature do not require the CLA. - ## Issues and Feature Requests -To submit an issue/bug report, or to request a feature, please submit a [github issue](https://github.com/tableau/server-client-python/issues) to the repo. +To submit an issue/bug report, or to request a feature, please submit a [GitHub issue](https://github.com/tableau/server-client-python/issues) to the repo. If you are submitting a bug report, please provide as much information as you can, including clear and concise repro steps, attaching any necessary files to assist in the repro. **Be sure to scrub the files of any potentially sensitive information. Issues are public.** @@ -23,33 +20,6 @@ files to assist in the repro. **Be sure to scrub the files of any potentially s For a feature request, please try to describe the scenario you are trying to accomplish that requires the feature. This will help us understand the limitations that you are running into, and provide us with a use case to know if we've satisfied your request. -### Label usage on Issues - -The core team is responsible for assigning most labels to the issue. Labels -are used for prioritizing the core team's work, and use the following -definitions for labels. - -The following labels are only to be set or changed by the core team: - -* **bug** - A bug is an unintended behavior for existing functionality. It only relates to existing functionality and the behavior that is expected with that functionality. We do not use **bug** to indicate priority. -* **enhancement** - An enhancement is a new piece of functionality and is related to the fact that new code will need to be written in order to close this issue. We do not use **enhancement** to indicate priority. -* **CLARequired** - This label is used to indicate that the contribution will require that the CLA is signed before we can accept a PR. This label should not be used on Issues -* **CLANotRequired** - This label is used to indicate that the contribution does not require a CLA to be signed. This is used for minor fixes and usually around doc fixes or correcting strings. -* **help wanted** - This label on an issue indicates it's a good choice for external contributors to take on. It usually means it's an issue that can be tackled by first time contributors. - -The following labels can be used by the issue creator or anyone in the -community to help us prioritize enhancement and bug fixes that are -causing pain from our users. The short of it is, purple tags are ones that -anyone can add to an issue: - -* **Critical** - This means that you won't be able to use the library until the issues have been resolved. If an issue is already labeled as critical, but you want to show your support for it, add a +1 comment to the issue. This helps us know what issues are really impacting our users. -* **Nice To Have** - This means that the issue doesn't block your usage of the library, but would make your life easier. Like with critical, if the issue is already tagged with this, but you want to show your support, add a +1 comment to the issue. - -## Fixes, Implementations, and Documentation - -For all other things, please submit a PR that includes the fix, documentation, or new code that you are trying to contribute. More information on -creating a PR can be found in the [Development Guide](https://tableau.github.io/server-client-python/docs/dev-guide) +### Making Contributions -If the feature is complex or has multiple solutions that could be equally appropriate approaches, it would be helpful to file an issue to discuss the -design trade-offs of each solution before implementing, to allow us to collectively arrive at the best solution, which most likely exists in the middle -somewhere. +Refer to the [Developer Guide](https://tableau.github.io/server-client-python/docs/dev-guide) which explains how to make contributions to the TSC project. diff --git a/docs/Gemfile b/docs/Gemfile deleted file mode 100644 index 775d954bf..000000000 --- a/docs/Gemfile +++ /dev/null @@ -1,3 +0,0 @@ -source 'https://rubygems.org' -gem 'github-pages', group: :jekyll_plugins - diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..0700899ab --- /dev/null +++ b/docs/README.md @@ -0,0 +1,3 @@ +To view the documentation source for the Tableau Server Client library, find the `doc` folder in the [`gh-pages`](https://github.com/tableau/server-client-python/tree/gh-pages/docs) branch of this repo. + +For more info about contributing, see the [Developer Guide](https://tableau.github.io/server-client-python/docs/dev-guide#update-the-documentation) page. diff --git a/docs/_config.yml b/docs/_config.yml deleted file mode 100644 index 5ea15f228..000000000 --- a/docs/_config.yml +++ /dev/null @@ -1,17 +0,0 @@ -# Site settings -title: Tableau Server Client Library (Python) -email: github@tableau.com -description: Simplify interactions with the Tableau Server REST API. -baseurl: "/server-client-python" -permalinks: pretty -defaults: - - - scope: - path: "" # Apply to all files - values: - layout: "default" - -# Build settings -markdown: kramdown -highlighter: rouge - diff --git a/docs/_includes/analytics.html b/docs/_includes/analytics.html deleted file mode 100644 index 0cdbad25d..000000000 --- a/docs/_includes/analytics.html +++ /dev/null @@ -1,7 +0,0 @@ - - - diff --git a/docs/_includes/docs_menu.html b/docs/_includes/docs_menu.html deleted file mode 100644 index 104a1f5b3..000000000 --- a/docs/_includes/docs_menu.html +++ /dev/null @@ -1,73 +0,0 @@ -
- {% include search_form.html %} - -
diff --git a/docs/_includes/footer.html b/docs/_includes/footer.html deleted file mode 100644 index 486c81d22..000000000 --- a/docs/_includes/footer.html +++ /dev/null @@ -1,8 +0,0 @@ - - diff --git a/docs/_includes/head.html b/docs/_includes/head.html deleted file mode 100644 index 083e3f268..000000000 --- a/docs/_includes/head.html +++ /dev/null @@ -1,18 +0,0 @@ - - - - - {% if page.title %}{{ page.title | escape }}{% else %}{{ site.title | escape }}{% endif %} - - - - - - - - - - - - -{% if jekyll.environment == "production" %}{% include analytics.html %}{% endif %} diff --git a/docs/_includes/header.html b/docs/_includes/header.html deleted file mode 100644 index 106578dfc..000000000 --- a/docs/_includes/header.html +++ /dev/null @@ -1,29 +0,0 @@ - diff --git a/docs/_includes/icon-github.svg b/docs/_includes/icon-github.svg deleted file mode 100644 index 4422c4f5d..000000000 --- a/docs/_includes/icon-github.svg +++ /dev/null @@ -1 +0,0 @@ - diff --git a/docs/_includes/search_form.html b/docs/_includes/search_form.html deleted file mode 100644 index 41bb34259..000000000 --- a/docs/_includes/search_form.html +++ /dev/null @@ -1,7 +0,0 @@ -
- -
- diff --git a/docs/_layouts/default.html b/docs/_layouts/default.html deleted file mode 100644 index 38ee020bb..000000000 --- a/docs/_layouts/default.html +++ /dev/null @@ -1,34 +0,0 @@ - - - - - {% include head.html %} - - - -
- {% include header.html %} - - {% include footer.html %} -
- - - diff --git a/docs/_layouts/docs.html b/docs/_layouts/docs.html deleted file mode 100644 index 5355f63df..000000000 --- a/docs/_layouts/docs.html +++ /dev/null @@ -1,31 +0,0 @@ ---- -layout: docs ---- - - - - - - {% include head.html %} - - - -
- {% include header.html %} - {% include docs_menu.html %} - -
-

{{ page.title }}

- -
- {{ content }} - {% include footer.html %} -
-
- - - diff --git a/docs/_layouts/home.html b/docs/_layouts/home.html deleted file mode 100644 index c2cf32fcb..000000000 --- a/docs/_layouts/home.html +++ /dev/null @@ -1,19 +0,0 @@ ---- -layout: home ---- - - - - - {% include head.html %} - - - -
- {% include header.html %} - {{ content }} - {% include footer.html %} -
- - - diff --git a/docs/_layouts/search.html b/docs/_layouts/search.html deleted file mode 100644 index 96dbd94a1..000000000 --- a/docs/_layouts/search.html +++ /dev/null @@ -1,43 +0,0 @@ ---- -layout: search ---- - - - - - - {% include head.html %} - - - - - - - -
- {% include header.html %} - {% include docs_menu.html %} - -
-

-
-
-

Loading search results...

-
- - {% include footer.html %} -
-
- - diff --git a/docs/assets/logo.png b/docs/assets/logo.png deleted file mode 100644 index 607611521..000000000 Binary files a/docs/assets/logo.png and /dev/null differ diff --git a/docs/css/api_ref.css b/docs/css/api_ref.css deleted file mode 100644 index 62da93510..000000000 --- a/docs/css/api_ref.css +++ /dev/null @@ -1,709 +0,0 @@ - + + + + + + + "Europe" + "Middle East" + "The Americas" + "Oceania" + "Asia" + "Africa" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + <formatted-text> + <run fontsize='12'>Country ranks by GDP, GDP per Capita, Population, and Life Expectancy</run> + </formatted-text> + + + + + + + + + + + + + + Gross Domestic Product + in current US Dollars + + + + + + + Gross Domestic Product + per capita + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + "[World Indicators new].[sum:F: GDP (curr $):qk]" + "[World Indicators new].[rank:sum:F: GDP (curr $):qk]" + "[World Indicators new].[sum:F: GDP per capita (curr $):qk]" + "[World Indicators new].[rank:sum:F: GDP per capita (curr $):qk]" + "[World Indicators new].[sum:P: Population (count):qk]" + "[World Indicators new].[rank:sum:P: Population (count):qk]" + "[World Indicators new].[avg:H: Life exp (years):qk]" + "[World Indicators new].[rank:avg:H: Life exp (years) (copy):qk]" + + + + + + + + + [World Indicators new].[:Measure Names] + [World Indicators new].[yr:Date:ok] + [World Indicators new].[none:F: GDP (curr $):qk] + + + + + + + + + + + + + + + + <[World Indicators new].[none:Country / Region:nk]> + Æ + <[World Indicators new].[:Measure Names]>: + <[World Indicators new].[Multiple Values]> + + + + + + [World Indicators new].[none:Country / Region:nk] + [World Indicators new].[:Measure Names] +
+
+ + + + <formatted-text> + <run fontsize='11'><</run> + <run fontsize='11'>[World Indicators new].[yr:Date:ok]</run> + <run fontsize='11'>></run> + <run fontsize='11'> GDP per capita by country</run> + </formatted-text> + + + + + + + + + + + + + + + + + + + Gross Domestic Product + per capita + + + + + + + + + + + + + + + + + + + + + + [World Indicators new].[yr:Date:ok] + [World Indicators new].[none:Region:nk] + + + + + + + + + + + + + + + + + + + Country: + <[World Indicators new].[none:Country / Region:nk]> + Region: + <[World Indicators new].[none:Region:nk]> + GDP per capita (curr $): + <[World Indicators new].[avg:F: GDP per capita (curr $):qk]> + % of world average: + <[World Indicators new].[usr:Calculation1:qk]> + + + + + + [World Indicators new].[none:Country / Region:nk] + [World Indicators new].[avg:F: GDP per capita (curr $):qk] +
+
+ + + + <formatted-text> + <run fontsize='12'>GDP per capita by region </run> + <run>Click on a point to filter the map to a specific year.</run> + </formatted-text> + + + + + + + + + + + + + + + + + Gross Domestic Product + in current US Dollars + + + + + + + + + + + + + + + + + + [World Indicators new].[Action (Country Name)] + [World Indicators new].[Action (Region)] + + + + + + + + + + + + + + + + + <[World Indicators new].[none:Region:nk]> + Year: + <[World Indicators new].[yr:Date:ok]> + Average GDP (curr $): + <[World Indicators new].[avg:F: GDP (curr $):qk]> + GDP per capita (weighted): + <[World Indicators new].[usr:Calculation_1590906174513693:qk]> + + + + + + [World Indicators new].[usr:Calculation_1590906174513693:qk] + [World Indicators new].[yr:Date:ok] +
+
+ + + + <formatted-text> + <run fontsize='12'>GDP per capita by country </run> + <run>Currently filtered to </run> + <run fontcolor='#4f6e8d'><[World Indicators new].[yr:Date:ok]></run> + </formatted-text> + + + + + + + + + + + + + + + + + + + + + + Gross Domestic Product + per capita + + + + + + + + + + + + + + + + + + + + + + + + + + + + 199.0 + 104512.0 + + + + + + + + "The Americas" + "Europe" + %null% + "Oceania" + "Africa" + "Middle East" + "Asia" + %all% + + + + [World Indicators new].[avg:F: GDP per capita (curr $):qk] + [World Indicators new].[none:Region:nk] + [World Indicators new].[Action (YEAR(Date (year)))] + + + + + + + + + + + + + + + + + + + + + <[World Indicators new].[none:Country / Region:nk]> + Æ + Region: + <[World Indicators new].[none:Region:nk]> + Subregion: + <[World Indicators new].[none:Subregion:nk]> + GDP per capita (curr $): + <[World Indicators new].[avg:F: GDP per capita (curr $):qk]> + GDP % of Subregion average: + <[World Indicators new].[usr:Calculation1:qk:5]> + GDP % of World average: + <[World Indicators new].[usr:Calculation1:qk:1]> + + + + + + [World Indicators new].[Latitude (generated)] + [World Indicators new].[Longitude (generated)] +
+
+ + + + <formatted-text> + <run fontsize='12'><Sheet Name>, <Page Name></run> + <run>Æ </run> + <run fontcolor='#898989' fontsize='10'>Click the forward button on year to watch the change over time Hover over mark to see the history of that country</run> + </formatted-text> + + + + + + + + + + + + + + + + + + + + + + + [World Indicators new].[avg:H: Health exp/cap (curr $):qk] + [World Indicators new].[avg:H: Life exp (years):qk] + + + + + + + + + + + + + + + + + <[World Indicators new].[none:Country / Region:nk]> + Æ + Region: + <[World Indicators new].[none:Region:nk]> + Year: + <[World Indicators new].[yr:Date:ok]> + Health exp/cap (curr $): + <[World Indicators new].[avg:H: Health exp/cap (curr $):qk]> + Life Expectancy: + <[World Indicators new].[avg:H: Life exp (years):qk]> + + + + + + [World Indicators new].[avg:H: Life exp (years):qk] + [World Indicators new].[avg:H: Health exp/cap (curr $):qk] + + [World Indicators new].[yr:Date:ok] + + +
+
+ + + + <formatted-text> + <run fontsize='12'>Lending and deposit interest rates, GDP per capita and % of world GDP sorted by GDP per Capita for region and subregion, </run> + <run fontsize='12'><</run> + <run fontsize='12'>[World Indicators new].[yr:Date:ok]</run> + <run fontsize='12'>></run> + </formatted-text> + + + + + + + + + + + + + + + + + + + + Gross Domestic Product + in current US Dollars + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + "[World Indicators new].[avg:F: Lending interest rate (\%):qk]" + "[World Indicators new].[avg:F: Deposit interest rate (\%):qk]" + "[World Indicators new].[usr:Calculation_8570907072742130:qk]" + "[World Indicators new].[usr:Calculation_1590906174513693:qk]" + "[World Indicators new].[pcto:sum:F: GDP (curr $):qk]" + + + + + + + + + [World Indicators new].[:Measure Names] + [World Indicators new].[yr:Date:ok] + + + + + + + + + + + + + + + + + ([World Indicators new].[none:Region:nk] / [World Indicators new].[none:Subregion:nk]) + [World Indicators new].[:Measure Names] +
+
+ + + + <formatted-text> + <run><[World Indicators new].[yr:Date:ok]> Country <Sheet Name></run> + </formatted-text> + + + + + + + + + + + + + + Gross Domestic Product + in current US Dollars + + + + + + + + + + + + + + + + + + + + [World Indicators new].[yr:Date:ok] + [World Indicators new].[sum:F: GDP (curr $):qk] + + + + + + + + + + + + + + + + + + + + <[World Indicators new].[none:Country / Region:nk]> + Æ + Region: + <[World Indicators new].[none:Region:nk]> + % of World GDP: + <[World Indicators new].[pcto:sum:F: GDP (curr $):qk:1]> + GDP (US $'s): + <[World Indicators new].[sum:F: GDP (curr $):qk]> + + + + + <[World Indicators new].[none:Country / Region:nk]> + Æ + <[World Indicators new].[pcto:sum:F: GDP (curr $):qk:1]> <[World Indicators new].[sum:F: GDP (curr $):qk]> + + + + + + + +
+
+
+ + + + + <formatted-text> + <run fontalignment='0'>GDP per Capita</run> + </formatted-text> + + + + + + + + + + "Europe" + "Middle East" + "The Americas" + "Oceania" + "Asia" + "Africa" + + + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-graph> + + + + + + + + + diff --git a/test/assets/World Indicators.tdsx b/test/assets/World Indicators.tdsx new file mode 100644 index 000000000..6e041442b Binary files /dev/null and b/test/assets/World Indicators.tdsx differ diff --git a/test/assets/custom_view_download.json b/test/assets/custom_view_download.json new file mode 100644 index 000000000..1ba2d74b7 --- /dev/null +++ b/test/assets/custom_view_download.json @@ -0,0 +1,47 @@ +[ + { + "isSourceView": true, + "viewName": "Overview", + "tcv": "<?xml version='1.0' encoding='utf-8' ?>

<customized-view dashboard='Overview' source-build='2024.2.0 (20242.24.0716.1944)' version='18.1' xmlns:user='http://www.tableausoftware.com/xml/user'>
  <active id='1' />
  <datasources>
    <datasource name='federated.10nnk8d1vgmw8q17yu76u06pnbcj'>
      <column datatype='string' name='[:Measure Names]' role='dimension' type='nominal'>
        <aliases>
          <alias key='&quot;[federated.10nnk8d1vgmw8q17yu76u06pnbcj].[ctd:Customer Name:qk]&quot;' value='Count of Customers' />
        </aliases>
      </column>
      <group caption='Action (MONTH(Order Date),Product Category)' hidden='true' name='[Action (MONTH(Order Date),Product Category)]' name-style='unqualified' user:auto-column='sheet_link'>
        <groupfilter function='crossjoin'>
          <groupfilter function='level-members' level='[tmn:Order Date:ok]' />
          <groupfilter function='level-members' level='[none:Category:nk]' />
        </groupfilter>
      </group>
      <column caption='Action (MONTH(Order Date),Product Category)' datatype='tuple' hidden='true' name='[Action (MONTH(Order Date),Product Category)]' role='dimension' type='nominal' user:auto-column='sheet_link' />
      <group caption='Action (MONTH(Order Date),Segment)' hidden='true' name='[Action (MONTH(Order Date),Segment)]' name-style='unqualified' user:auto-column='sheet_link'>
        <groupfilter function='crossjoin'>
          <groupfilter function='level-members' level='[tmn:Order Date:ok]' />
          <groupfilter function='level-members' level='[Segment]' />
        </groupfilter>
      </group>
      <column caption='Action (MONTH(Order Date),Segment)' datatype='tuple' hidden='true' name='[Action (MONTH(Order Date),Segment)]' role='dimension' type='nominal' user:auto-column='sheet_link' />
      <group caption='Action (Order Profitable?,Category,MONTH(Order Date))' hidden='true' name='[Action (Order Profitable?,Category,MONTH(Order Date))]' name-style='unqualified' user:auto-column='sheet_link'>
        <groupfilter function='crossjoin'>
          <groupfilter function='level-members' level='[Calculation_9060122104947471]' />
          <groupfilter function='level-members' level='[Category]' />
          <groupfilter function='level-members' level='[tmn:Order Date:ok]' />
        </groupfilter>
      </group>
      <column caption='Action (Order Profitable?,Category,MONTH(Order Date))' datatype='tuple' hidden='true' name='[Action (Order Profitable?,Category,MONTH(Order Date))]' role='dimension' type='nominal' user:auto-column='sheet_link' />
      <group caption='Action (Order Profitable?,MONTH(Order Date),Segment)' hidden='true' name='[Action (Order Profitable?,MONTH(Order Date),Segment)]' name-style='unqualified' user:auto-column='sheet_link'>
        <groupfilter function='crossjoin'>
          <groupfilter function='level-members' level='[Calculation_9060122104947471]' />
          <groupfilter function='level-members' level='[tmn:Order Date:ok]' />
          <groupfilter function='level-members' level='[Segment]' />
        </groupfilter>
      </group>
      <column caption='Action (Order Profitable?,MONTH(Order Date),Segment)' datatype='tuple' hidden='true' name='[Action (Order Profitable?,MONTH(Order Date),Segment)]' role='dimension' type='nominal' user:auto-column='sheet_link' />
      <group caption='Action (Postal Code,State/Province)' hidden='true' name='[Action (Postal Code,State/Province)]' name-style='unqualified' user:auto-column='sheet_link'>
        <groupfilter function='crossjoin'>
          <groupfilter function='level-members' level='[none:Postal Code:nk]' />
          <groupfilter function='level-members' level='[State/Province]' />
        </groupfilter>
      </group>
      <column caption='Action (Postal Code,State/Province)' datatype='tuple' hidden='true' name='[Action (Postal Code,State/Province)]' role='dimension' type='nominal' user:auto-column='sheet_link' />
      <group caption='Action (State/Province)' hidden='true' name='[Action (State/Province)]' name-style='unqualified' user:auto-column='sheet_link'>
        <groupfilter function='crossjoin'>
          <groupfilter function='level-members' level='[State/Province]' />
        </groupfilter>
      </group>
      <column caption='Action (State/Province)' datatype='tuple' hidden='true' name='[Action (State/Province)]' role='dimension' type='nominal' user:auto-column='sheet_link' />
      <column-instance column='[Calculation_9060122104947471]' derivation='None' name='[none:Calculation_9060122104947471:nk]' pivot='key' type='nominal' />
      <column-instance column='[Order Date]' derivation='None' name='[none:Order Date:qk]' pivot='key' type='quantitative' />
      <column-instance column='[Region]' derivation='None' name='[none:Region:nk]' pivot='key' type='nominal' />
      <column-instance column='[Calculation_9921103144103743]' derivation='User' name='[usr:Calculation_9921103144103743:qk]' pivot='key' type='quantitative' />
    </datasource>
  </datasources>
  <worksheet name='Total Sales'>
    <filter class='categorical' column='[federated.10nnk8d1vgmw8q17yu76u06pnbcj].[Action (State/Province)]'>
      <groupfilter function='member' level='[State/Province]' member='&quot;Texas&quot;' user:ui-action-filter='[Action1]' user:ui-domain='database' user:ui-enumeration='inclusive' user:ui-marker='enumerate' />
    </filter>
    <filter class='categorical' column='[federated.10nnk8d1vgmw8q17yu76u06pnbcj].[none:Region:nk]' filter-group='14'>
      <groupfilter function='member' level='[none:Region:nk]' member='&quot;Central&quot;' user:ui-domain='database' user:ui-enumeration='inclusive' user:ui-marker='enumerate' />
    </filter>
    <table />
  </worksheet>
  <worksheet name='Sale Map'>
    <filter class='categorical' column='[federated.10nnk8d1vgmw8q17yu76u06pnbcj].[none:Region:nk]' filter-group='14'>
      <groupfilter function='member' level='[none:Region:nk]' member='&quot;Central&quot;' user:ui-domain='database' user:ui-enumeration='inclusive' user:ui-marker='enumerate' />
    </filter>
    <table />
  </worksheet>
  <worksheet name='Sales by Segment'>
    <filter class='categorical' column='[federated.10nnk8d1vgmw8q17yu76u06pnbcj].[Action (State/Province)]'>
      <groupfilter function='member' level='[State/Province]' member='&quot;Texas&quot;' user:ui-action-filter='[Action1]' user:ui-domain='database' user:ui-enumeration='inclusive' user:ui-marker='enumerate' />
    </filter>
    <filter class='categorical' column='[federated.10nnk8d1vgmw8q17yu76u06pnbcj].[none:Region:nk]' filter-group='14'>
      <groupfilter function='member' level='[none:Region:nk]' member='&quot;Central&quot;' user:ui-domain='database' user:ui-enumeration='inclusive' user:ui-marker='enumerate' />
    </filter>
    <table />
  </worksheet>
  <worksheet name='Sales by Product'>
    <filter class='categorical' column='[federated.10nnk8d1vgmw8q17yu76u06pnbcj].[Action (State/Province)]'>
      <groupfilter function='member' level='[State/Province]' member='&quot;Texas&quot;' user:ui-action-filter='[Action1]' user:ui-domain='database' user:ui-enumeration='inclusive' user:ui-marker='enumerate' />
    </filter>
    <filter class='categorical' column='[federated.10nnk8d1vgmw8q17yu76u06pnbcj].[none:Region:nk]' filter-group='14'>
      <groupfilter function='member' level='[none:Region:nk]' member='&quot;Central&quot;' user:ui-domain='database' user:ui-enumeration='inclusive' user:ui-marker='enumerate' />
    </filter>
    <table />
  </worksheet>
  <windows>
    <window class='worksheet' name='Sale Map'>
      <selection-collection>
        <tuple-selection>
          <tuple-reference>
            <tuple-descriptor>
              <pane-descriptor>
                <x-fields>
                  <field>[federated.10nnk8d1vgmw8q17yu76u06pnbcj].[Longitude (generated)]</field>
                </x-fields>
                <y-fields>
                  <field>[federated.10nnk8d1vgmw8q17yu76u06pnbcj].[Latitude (generated)]</field>
                </y-fields>
              </pane-descriptor>
              <columns>
                <field>[federated.10nnk8d1vgmw8q17yu76u06pnbcj].[none:Country/Region:nk]</field>
                <field>[federated.10nnk8d1vgmw8q17yu76u06pnbcj].[none:State/Province:nk]</field>
                <field>[federated.10nnk8d1vgmw8q17yu76u06pnbcj].[Geometry (generated)]</field>
                <field>[federated.10nnk8d1vgmw8q17yu76u06pnbcj].[Latitude (generated)]</field>
                <field>[federated.10nnk8d1vgmw8q17yu76u06pnbcj].[Longitude (generated)]</field>
                <field>[federated.10nnk8d1vgmw8q17yu76u06pnbcj].[usr:Calculation_9921103144103743:qk]</field>
              </columns>
            </tuple-descriptor>
            <tuple>
              <value>&quot;United States&quot;</value>
              <value>&quot;Texas&quot;</value>
              <value>&quot;MULTIPOLYGON(((-97.1463 25.9556,-97.208 25.9636,-97.2772 25.9354,-97.3489 25.9308,-97.3744 25.9074,-97.3576 25.8869,-97.3737 25.84,-97.4539 25.8544,-97.4564 25.8838,-97.5218 25.8865,-97.5482 25.9355,-97.5826 25.9379,-97.6449 26.0275,-97.7067 26.0374,-97.7641 26.0286,-97.8013 26.06,-97.8355 26.0469,-97.8619 26.0698,-97.9099 26.0569,-97.9661 26.0519,-98.0308 26.065,-98.0701 26.0379,-98.0791 26.0705,-98.1355 26.072,-98.1575 26.0544,-98.197 26.0562,-98.3065 26.1043,-98.3352 26.1376,-98.3867 26.1579,-98.4443 26.2012,-98.4452 26.2246,-98.5061 26.209,-98.5224 26.2209,-98.5615 26.2245,-98.5867 26.2575,-98.6542 26.236,-98.6794 26.2492,-98.7538 26.3317,-98.7898 26.3316,-98.8269 26.3696,-98.8962 26.3532,-98.9292 26.3932,-98.9465 26.3699,-98.9742 26.4011,-99.0106 26.3921,-99.04 26.4129,-99.0948 26.4109,-99.1109 26.4263,-99.0916 26.4764,-99.1284 26.5255,-99.1667 26.5361,-99.1694 26.5717,-99.2002 26.6558,-99.2089 26.7248,-99.24 26.7459,-99.2424 26.7883,-99.2686 26.8432,-99.3289 26.8802,-99.3218 26.9068,-99.3883 26.9442,-99.3773 26.9738,-99.4155 27.0172,-99.4465 27.023,-99.451 27.0668,-99.4303 27.0949,-99.4396 27.1521,-99.4264 27.1783,-99.4538 27.2651,-99.4966 27.2717,-99.495 27.3039,-99.5379 27.3175,-99.5044 27.3399,-99.4804 27.4816,-99.5283 27.4989,-99.5111 27.5645,-99.5568 27.6143,-99.58 27.6023,-99.594 27.6386,-99.6389 27.6268,-99.6913 27.6687,-99.7284 27.6793,-99.7707 27.7321,-99.8331 27.7629,-99.8723 27.7953,-99.8813 27.8496,-99.9015 27.8642,-99.9001 27.9121,-99.9371 27.9405,-99.9318 27.981,-99.9898 27.9929,-100.019 28.0664,-100.0561 28.0913,-100.0869 28.1468,-100.1592 28.1676,-100.2122 28.1968,-100.2236 28.2352,-100.2578 28.2403,-100.2935 28.2785,-100.2886 28.317,-100.3493 28.4014,-100.3362 28.4302,-100.3682 28.4789,-100.3347 28.5003,-100.387 28.514,-100.4104 28.5543,-100.3985 28.5852,-100.4476 28.6101,-100.4457 28.6406,-100.5004 28.662,-100.5076 28.7406,-100.5336 28.7611,-100.5466 28.8249,-100.5705 28.8263,-100.5915 28.8893,-100.6488 28.941,-100.6459 28.9864,-100.6675 29.0843,-100.7759 29.1733,-100.7659 29.1875,-100.7948 29.2416,-100.8761 29.2796,-100.8868 29.3078,-100.9507 29.3477,-101.0066 29.366,-101.0602 29.4587,-101.1519 29.477,-101.1738 29.5146,-101.2612 29.5368,-101.241 29.565,-101.2622 29.6306,-101.291 29.5715,-101.3116 29.5851,-101.3 29.6407,-101.3141 29.6591,-101.3632 29.6526,-101.3754 29.7018,-101.4156 29.7465,-101.4489 29.7507,-101.4558 29.788,-101.5392 29.7618,-101.5419 29.8108,-101.5758 29.7693,-101.7106 29.7617,-101.7609 29.7821,-101.8062 29.7808,-101.8534 29.8079,-101.9335 29.7851,-102.0383 29.8031,-102.049 29.7856,-102.1161 29.7925,-102.1949 29.8371,-102.3207 29.8789,-102.3648 29.8443,-102.3897 29.7819,-102.5174 29.7838,-102.548 29.745,-102.5724 29.7561,-102.623 29.7364,-102.6749 29.7443,-102.6934 29.6772,-102.7422 29.6307,-102.745 29.5932,-102.7683 29.5947,-102.7714 29.5489,-102.8084 29.5229,-102.831 29.4443,-102.8247 29.3973,-102.8399 29.3606,-102.8786 29.3539,-102.9032 29.254,-102.8706 29.2369,-102.8901 29.2088,-102.9502 29.1736,-102.9738 29.1855,-103.0325 29.1047,-103.0753 29.0923,-103.1007 29.0602,-103.1153 28.9853,-103.1533 28.9718,-103.2274 28.9915,-103.2792 28.9777,-103.2986 29.0068,-103.4337 29.045,-103.4506 29.0728,-103.5545 29.1585,-103.7192 29.1814,-103.7927 29.2623,-103.8147 29.2738,-103.9696 29.2978,-104.0199 29.3121,-104.1065 29.3731,-104.163 29.3919,-104.2175 29.4559,-104.209 29.481,-104.2642 29.514,-104.3381 29.52,-104.4006 29.573,-104.4669 29.6096,-104.5442 29.6816,-104.5661 29.7714,-104.6295 29.8523,-104.6825 29.9348,-104.674 29.9567,-104.7063 30.0497,-104.6879 30.0739,-104.6966 30.1344,-104.6872 30.179,-104.7068 30.2354,-104.7632 30.2744,-104.7735 30.3027,-104.8226 30.3503,-104.8163 30.3743,-104.8595 30.3911,-104.8694 30.4773,-104.8824 30.5323,-104.919 30.5977,-104.9721 30.6103,-105.0065 30.6858,-105.0625 30.6866,-105.1181 30.7495,-105.1617 30.7521,-105.2177 30.806,-105.2561 30.7945,-105.2917 30.8261,-105.3615 30.8503,-105.3956 30.849,-105.4135 30.8998,-105.4988 30.9503,-105.5786 31.0206,-105.5851 31.0569,-105.6467 31.1139,-105.7739 31.168,-105.8188 31.2307,-105.8747 31.2913,-105.9312 31.3127,-105.9539 31.3647,-106.0162 31.3935,-106.0753 31.3976,-106.1911 31.4599,-106.2196 31.4816,-106.2452 31.5391,-106.2801 31.5615,-106.3079 31.6295,-106.3811 31.7321,-106.4514 31.7644,-106.4905 31.7489,-106.5282 31.7831,-106.5471 31.8073,-106.6053 31.8277,-106.6455 31.8987,-106.6118 31.92,-106.6185 32.0005,-105.998 32.0023,-105.2505 32.0003,-104.8478 32.0005,-104.0245 32,-103.0644 32.0005,-103.0647 32.9591,-103.0567 33.3884,-103.044 33.9746,-103.0424 35.1831,-103.0408 36.0552,-103.0419 36.5004,-103.0024 36.5004,-102.0323 36.5006,-101.6239 36.4995,-101.0852 36.4992,-100.0004 36.4997,-100.0004 34.7465,-99.9975 34.5606,-99.9232 34.5746,-99.8446 34.5069,-99.7534 34.4209,-99.6945 34.3782,-99.6 34.3747,-99.5798 34.4169,-99.5176 34.4145,-99.4335 34.3702,-99.3987 34.3758,-99.3952 34.442,-99.3756 34.4588,-99.3201 34.4093,-99.2613 34.4035,-99.2108 34.3368,-99.1898 34.2144,-99.0953 34.2118,-99.0434 34.1982,-98.9917 34.2214,-98.9524 34.2125,-98.8601 34.1499,-98.8311 34.1622,-98.7667 34.1368,-98.6901 34.1332,-98.6481 34.1644,-98.6102 34.1571,-98.5602 34.1332,-98.487 34.0629,-98.4235 34.0828,-98.3984 34.1285,-98.364 34.1571,-98.3002 34.1346,-98.2325 34.1346,-98.1688 34.1143,-98.1391 34.1419,-98.1019 34.1468,-98.0905 34.1225,-98.1202 34.0721,-98.0838 34.0417,-98.0844 34.0029,-98.0163 33.9941,-97.9742 34.0067,-97.9468 33.9909,-97.9712 33.9372,-97.9572 33.9145,-97.9779 33.8899,-97.8714 33.849,-97.8343 33.8577,-97.763 33.9341,-97.7323 33.9367,-97.6877 33.9872,-97.6615 33.9908,-97.5888 33.9519,-97.5893 33.9039,-97.5609 33.8996,-97.4842 33.9154,-97.4511 33.8917,-97.4629 33.8429,-97.4439 33.8237,-97.3729 33.8195,-97.3319 33.8845,-97.2556 33.8637,-97.2462 33.9003,-97.2103 33.9159,-97.1855 33.9007,-97.1668 33.8404,-97.1974 33.8298,-97.1934 33.7606,-97.1513 33.7226,-97.1111 33.7194,-97.0887 33.7387,-97.088 33.8087,-97.048 33.8179,-97.0873 33.8398,-97.0573 33.8569,-97.0235 33.8445,-96.9856 33.8865,-96.9963 33.9427,-96.9348 33.9545,-96.8994 33.9337,-96.883 33.868,-96.8506 33.8472,-96.8322 33.8748,-96.7796 33.8579,-96.7694 33.8275,-96.7137 33.8313,-96.6907 33.85,-96.6734 33.9123,-96.5885 33.895,-96.629 33.8524,-96.5732 33.8192,-96.5329 33.823,-96.5007 33.7726,-96.4226 33.776,-96.3795 33.7258,-96.3622 33.6918,-96.3184 33.6971,-96.303 33.7509,-96.2773 33.7697,-96.2304 33.7485,-96.1781 33.7605,-96.1492 33.8371,-96.1015 33.8467,-96.0488 33.8365,-95.9419 33.861,-95.9321 33.8865,-95.8433 33.8383,-95.8045 33.8622,-95.7679 33.8468,-95.7566 33.892,-95.6949 33.8868,-95.6686 33.907,-95.6273 33.9078,-95.5975 33.9423,-95.5577 33.9304,-95.5434 33.8805,-95.4598 33.888,-95.4382 33.8671,-95.3105 33.8772,-95.2822 33.8759,-95.2714 33.9126,-95.2194 33.9616,-95.1559 33.9368,-95.1296 33.9367,-95.1176 33.9046,-95.0824 33.8799,-95.0601 33.9019,-95.049 33.8641,-94.9689 33.8609,-94.9535 33.8165,-94.9233 33.8087,-94.9115 33.7784,-94.8493 33.7396,-94.8234 33.7692,-94.8023 33.7328,-94.7713 33.7607,-94.7461 33.703,-94.6848 33.6844,-94.6679 33.6946,-94.6392 33.6637,-94.6214 33.6826,-94.5908 33.6456,-94.5464 33.66,-94.5204 33.6175,-94.4859 33.6379,-94.3895 33.5467,-94.3536 33.544,-94.3455 33.5673,-94.3096 33.5517,-94.2759 33.558,-94.2192 33.5561,-94.1843 33.5946,-94.1474 33.5652,-94.0824 33.5757,-94.0434 33.5523,-94.043 33.0192,-94.0427 31.9993,-94.0156 31.9799,-93.9708 31.92,-93.9299 31.9127,-93.8967 31.8853,-93.8748 31.8223,-93.8226 31.7736,-93.8369 31.7502,-93.7945 31.7021,-93.8217 31.674,-93.8187 31.6146,-93.8349 31.5862,-93.785 31.526,-93.7125 31.5134,-93.7495 31.4687,-93.6926 31.4372,-93.7049 31.4109,-93.6741 31.3977,-93.6691 31.3654,-93.6875 31.3108,-93.5984 31.2311,-93.6003 31.1762,-93.5526 31.1856,-93.5394 31.1152,-93.5632 31.097,-93.5276 31.0745,-93.5089 31.0293,-93.5563 31.0041,-93.5684 30.9691,-93.5321 30.9579,-93.5263 30.9297,-93.5586 30.9132,-93.5536 30.8351,-93.6148 30.756,-93.6077 30.7156,-93.6315 30.678,-93.6831 30.6408,-93.6788 30.5986,-93.7275 30.5747,-93.7338 30.5317,-93.6978 30.4438,-93.7417 30.4023,-93.7623 30.3537,-93.7421 30.301,-93.7047 30.2899,-93.707 30.2437,-93.721 30.2104,-93.6928 30.1352,-93.7328 30.0829,-93.7225 30.0509,-93.7551 30.0153,-93.8717 29.981,-93.8692 29.938,-93.9506 29.8493,-93.9466 29.7801,-93.8377 29.679,-94.0143 29.6798,-94.3543 29.561,-94.4991 29.5068,-94.4702 29.5571,-94.5459 29.5725,-94.7625 29.5241,-94.7039 29.6325,-94.6957 29.7565,-94.7389 29.7906,-94.8141 29.759,-94.8728 29.6714,-94.9303 29.6737,-95.0166 29.7205,-95.0726 29.8262,-95.0955 29.7576,-94.9833 29.6823,-94.9985 29.6164,-95.0789 29.5353,-95.017 29.548,-94.9096 29.4961,-94.9504 29.4667,-94.8854 29.3897,-95.0574 29.2013,-95.1496 29.1805,-95.2342 28.9926,-95.3856 28.8646,-95.5072 28.8254,-95.6537 28.7499,-95.6727 28.7495,-95.784 28.6794,-95.9149 28.6388,-95.6776 28.7494,-95.7853 28.7471,-95.9236 28.7015,-95.9608 28.6152,-96.3355 28.4381,-96.1463 28.5427,-95.9906 28.6016,-96.0388 28.6528,-96.1524 28.6135,-96.2354 28.6427,-96.2078 28.6981,-96.3229 28.6419,-96.386 28.6748,-96.4284 28.7071,-96.4348 28.603,-96.5615 28.6454,-96.5736 28.7055,-96.6596 28.7226,-96.6614 28.7026,-96.6121 28.6394,-96.6385 28.5719,-96.5667 28.5825,-96.4153 28.4637,-96.4322 28.4325,-96.6503 28.3325,-96.7084 28.4075,-96.7857 28.4476,-96.7832 28.4004,-96.8589 28.4176,-96.7905 28.3192,-96.8095 28.2199,-96.9111 28.1357,-96.9868 28.1287,-97.0373 28.2013,-97.2415 28.0623,-97.15 28.0338,-97.1354 28.0472,-97.0246 28.1133,-97.031 28.0486,-97.1338 27.9009,-97.1569 27.8728,-97.2134 27.821,-97.2501 27.8764,-97.3548 27.8502,-97.3312 27.8738,-97.5281 27.8474,-97.3829 27.8387,-97.3617 27.7351,-97.245 27.6931,-97.3248 27.561,-97.4123 27.3224,-97.5011 27.2915,-97.4737 27.4029,-97.5339 27.3398,-97.6374 27.301,-97.7352 27.4182,-97.6619 27.2875,-97.7966 27.2726,-97.6574 27.2737,-97.5341 27.2253,-97.4487 27.2631,-97.4511 27.1216,-97.5052 27.0856,-97.479 26.9991,-97.5614 26.998,-97.5629 26.8389,-97.471 26.7501,-97.4464 26.5999,-97.4177 26.3702,-97.3406 26.3318,-97.2955 26.1908,-97.3121 26.1216,-97.2365 26.0646,-97.2516 25.9643,-97.1527 26.0275,-97.1463 25.9556)),((-94.5117 29.5158,-94.6592 29.4375,-94.7282 29.3716,-94.7774 29.3759,-94.6852 29.4513,-94.5117 29.5158)),((-94.7518 29.3329,-94.8049 29.2787,-95.0562 29.1299,-94.8613 29.2953,-94.7518 29.3329)),((-96.8201 28.1645,-96.7037 28.198,-96.3875 28.3762,-96.4403 28.3188,-96.6878 28.1859,-96.8479 28.0651,-96.8201 28.1645)),((-96.8722 28.1315,-96.85 28.0638,-97.0554 27.8472,-96.9632 28.0229,-96.8722 28.1315)),((-97.2943 26.6003,-97.3254 26.6003,-97.3094 26.6298,-97.3921 26.9367,-97.3916 27.1258,-97.3661 27.2781,-97.3712 27.2781,-97.3302 27.4352,-97.2472 27.5815,-97.1964 27.6837,-97.0925 27.8114,-97.0446 27.8344,-97.1504 27.7027,-97.2227 27.5765,-97.3472 27.278,-97.3793 27.0402,-97.3705 26.9081,-97.2901 26.6003,-97.2943 26.6003)))&quot;</value>
              <value>31.25</value>
              <value>-99.25</value>
              <value>-0.15118192455324594</value>
            </tuple>
          </tuple-reference>
        </tuple-selection>
      </selection-collection>
    </window>
  </windows>
</customized-view>
" + }, + { + "isSourceView": false, + "viewName": "Product", + "tcv": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0ndXRmLTgnID8-Cgo8Y3VzdG9taXplZC12aWV3IGRhc2hib2FyZD0nUHJvZHVjdCcgc291cmNlLWJ1aWxkPScyMDI0LjIuMCAoMjAyNDIuMjQuMDcxNi4xOTQ0KScgdmVyc2lvbj0nMTguMScgeG1sbnM6dXNlcj0naHR0cDovL3d3dy50YWJsZWF1c29mdHdhcmUuY29tL3htbC91c2VyJz4KICA8YWN0aXZlIGlkPSctMScgLz4KICA8ZGF0YXNvdXJjZXM-CiAgICA8ZGF0YXNvdXJjZSBuYW1lPSdmZWRlcmF0ZWQuMTBubms4ZDF2Z213OHExN3l1NzZ1MDZwbmJjaic-CiAgICAgIDxncm91cCBjYXB0aW9uPSdBY3Rpb24gKENhdGVnb3J5LFlFQVIoT3JkZXIgRGF0ZSksTU9OVEgoT3JkZXIgRGF0ZSkpJyBoaWRkZW49J3RydWUnIG5hbWU9J1tBY3Rpb24gKENhdGVnb3J5LFlFQVIoT3JkZXIgRGF0ZSksTU9OVEgoT3JkZXIgRGF0ZSkpXScgbmFtZS1zdHlsZT0ndW5xdWFsaWZpZWQnIHVzZXI6YXV0by1jb2x1bW49J3NoZWV0X2xpbmsnPgogICAgICAgIDxncm91cGZpbHRlciBmdW5jdGlvbj0nY3Jvc3Nqb2luJz4KICAgICAgICAgIDxncm91cGZpbHRlciBmdW5jdGlvbj0nbGV2ZWwtbWVtYmVycycgbGV2ZWw9J1tDYXRlZ29yeV0nIC8-CiAgICAgICAgICA8Z3JvdXBmaWx0ZXIgZnVuY3Rpb249J2xldmVsLW1lbWJlcnMnIGxldmVsPSdbeXI6T3JkZXIgRGF0ZTpva10nIC8-CiAgICAgICAgICA8Z3JvdXBmaWx0ZXIgZnVuY3Rpb249J2xldmVsLW1lbWJlcnMnIGxldmVsPSdbbW46T3JkZXIgRGF0ZTpva10nIC8-CiAgICAgICAgPC9ncm91cGZpbHRlcj4KICAgICAgPC9ncm91cD4KICAgICAgPGNvbHVtbiBjYXB0aW9uPSdBY3Rpb24gKENhdGVnb3J5LFlFQVIoT3JkZXIgRGF0ZSksTU9OVEgoT3JkZXIgRGF0ZSkpJyBkYXRhdHlwZT0ndHVwbGUnIGhpZGRlbj0ndHJ1ZScgbmFtZT0nW0FjdGlvbiAoQ2F0ZWdvcnksWUVBUihPcmRlciBEYXRlKSxNT05USChPcmRlciBEYXRlKSldJyByb2xlPSdkaW1lbnNpb24nIHR5cGU9J25vbWluYWwnIHVzZXI6YXV0by1jb2x1bW49J3NoZWV0X2xpbmsnIC8-CiAgICAgIDxncm91cCBjYXB0aW9uPSdBY3Rpb24gKFlFQVIoT3JkZXIgRGF0ZSksTU9OVEgoT3JkZXIgRGF0ZSkpJyBoaWRkZW49J3RydWUnIG5hbWU9J1tBY3Rpb24gKFlFQVIoT3JkZXIgRGF0ZSksTU9OVEgoT3JkZXIgRGF0ZSkpXScgbmFtZS1zdHlsZT0ndW5xdWFsaWZpZWQnIHVzZXI6YXV0by1jb2x1bW49J3NoZWV0X2xpbmsnPgogICAgICAgIDxncm91cGZpbHRlciBmdW5jdGlvbj0nY3Jvc3Nqb2luJz4KICAgICAgICAgIDxncm91cGZpbHRlciBmdW5jdGlvbj0nbGV2ZWwtbWVtYmVycycgbGV2ZWw9J1t5cjpPcmRlciBEYXRlOm9rXScgLz4KICAgICAgICAgIDxncm91cGZpbHRlciBmdW5jdGlvbj0nbGV2ZWwtbWVtYmVycycgbGV2ZWw9J1ttbjpPcmRlciBEYXRlOm9rXScgLz4KICAgICAgICA8L2dyb3VwZmlsdGVyPgogICAgICA8L2dyb3VwPgogICAgICA8Y29sdW1uIGNhcHRpb249J0FjdGlvbiAoWUVBUihPcmRlciBEYXRlKSxNT05USChPcmRlciBEYXRlKSknIGRhdGF0eXBlPSd0dXBsZScgaGlkZGVuPSd0cnVlJyBuYW1lPSdbQWN0aW9uIChZRUFSKE9yZGVyIERhdGUpLE1PTlRIKE9yZGVyIERhdGUpKV0nIHJvbGU9J2RpbWVuc2lvbicgdHlwZT0nbm9taW5hbCcgdXNlcjphdXRvLWNvbHVtbj0nc2hlZXRfbGluaycgLz4KICAgICAgPGdyb3VwIGNhcHRpb249J0FjdGlvbiAoWUVBUihPcmRlciBEYXRlKSxNT05USChPcmRlciBEYXRlKSxQcm9kdWN0IENhdGVnb3J5KScgaGlkZGVuPSd0cnVlJyBuYW1lPSdbQWN0aW9uIChZRUFSKE9yZGVyIERhdGUpLE1PTlRIKE9yZGVyIERhdGUpLFByb2R1Y3QgQ2F0ZWdvcnkpXScgbmFtZS1zdHlsZT0ndW5xdWFsaWZpZWQnIHVzZXI6YXV0by1jb2x1bW49J3NoZWV0X2xpbmsnPgogICAgICAgIDxncm91cGZpbHRlciBmdW5jdGlvbj0nY3Jvc3Nqb2luJz4KICAgICAgICAgIDxncm91cGZpbHRlciBmdW5jdGlvbj0nbGV2ZWwtbWVtYmVycycgbGV2ZWw9J1t5cjpPcmRlciBEYXRlOm9rXScgLz4KICAgICAgICAgIDxncm91cGZpbHRlciBmdW5jdGlvbj0nbGV2ZWwtbWVtYmVycycgbGV2ZWw9J1ttbjpPcmRlciBEYXRlOm9rXScgLz4KICAgICAgICAgIDxncm91cGZpbHRlciBmdW5jdGlvbj0nbGV2ZWwtbWVtYmVycycgbGV2ZWw9J1tub25lOkNhdGVnb3J5Om5rXScgLz4KICAgICAgICA8L2dyb3VwZmlsdGVyPgogICAgICA8L2dyb3VwPgogICAgICA8Y29sdW1uIGNhcHRpb249J0FjdGlvbiAoWUVBUihPcmRlciBEYXRlKSxNT05USChPcmRlciBEYXRlKSxQcm9kdWN0IENhdGVnb3J5KScgZGF0YXR5cGU9J3R1cGxlJyBoaWRkZW49J3RydWUnIG5hbWU9J1tBY3Rpb24gKFlFQVIoT3JkZXIgRGF0ZSksTU9OVEgoT3JkZXIgRGF0ZSksUHJvZHVjdCBDYXRlZ29yeSldJyByb2xlPSdkaW1lbnNpb24nIHR5cGU9J25vbWluYWwnIHVzZXI6YXV0by1jb2x1bW49J3NoZWV0X2xpbmsnIC8-CiAgICAgIDxjb2x1bW4taW5zdGFuY2UgY29sdW1uPSdbT3JkZXIgRGF0ZV0nIGRlcml2YXRpb249J01vbnRoJyBuYW1lPSdbbW46T3JkZXIgRGF0ZTpva10nIHBpdm90PSdrZXknIHR5cGU9J29yZGluYWwnIC8-CiAgICAgIDxjb2x1bW4taW5zdGFuY2UgY29sdW1uPSdbQ2F0ZWdvcnldJyBkZXJpdmF0aW9uPSdOb25lJyBuYW1lPSdbbm9uZTpDYXRlZ29yeTpua10nIHBpdm90PSdrZXknIHR5cGU9J25vbWluYWwnIC8-CiAgICAgIDxjb2x1bW4taW5zdGFuY2UgY29sdW1uPSdbT3JkZXIgRGF0ZV0nIGRlcml2YXRpb249J05vbmUnIG5hbWU9J1tub25lOk9yZGVyIERhdGU6cWtdJyBwaXZvdD0na2V5JyB0eXBlPSdxdWFudGl0YXRpdmUnIC8-CiAgICAgIDxjb2x1bW4taW5zdGFuY2UgY29sdW1uPSdbUmVnaW9uXScgZGVyaXZhdGlvbj0nTm9uZScgbmFtZT0nW25vbmU6UmVnaW9uOm5rXScgcGl2b3Q9J2tleScgdHlwZT0nbm9taW5hbCcgLz4KICAgICAgPGNvbHVtbi1pbnN0YW5jZSBjb2x1bW49J1tPcmRlciBEYXRlXScgZGVyaXZhdGlvbj0nWWVhcicgbmFtZT0nW3lyOk9yZGVyIERhdGU6b2tdJyBwaXZvdD0na2V5JyB0eXBlPSdvcmRpbmFsJyAvPgogICAgPC9kYXRhc291cmNlPgogIDwvZGF0YXNvdXJjZXM-CiAgPHdvcmtzaGVldCBuYW1lPSdQcm9kdWN0Vmlldyc-CiAgICA8dGFibGUgLz4KICA8L3dvcmtzaGVldD4KICA8d29ya3NoZWV0IG5hbWU9J1Byb2R1Y3REZXRhaWxzJz4KICAgIDx0YWJsZSAvPgogIDwvd29ya3NoZWV0Pgo8L2N1c3RvbWl6ZWQtdmlldz4K" + }, + { + "isSourceView": false, + "viewName": "Customers", + "tcv": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0ndXRmLTgnID8-Cgo8Y3VzdG9taXplZC12aWV3IGRhc2hib2FyZD0nQ3VzdG9tZXJzJyBzb3VyY2UtYnVpbGQ9JzIwMjQuMi4wICgyMDI0Mi4yNC4wNzE2LjE5NDQpJyB2ZXJzaW9uPScxOC4xJyB4bWxuczp1c2VyPSdodHRwOi8vd3d3LnRhYmxlYXVzb2Z0d2FyZS5jb20veG1sL3VzZXInPgogIDxhY3RpdmUgaWQ9Jy0xJyAvPgogIDxkYXRhc291cmNlcz4KICAgIDxkYXRhc291cmNlIG5hbWU9J2ZlZGVyYXRlZC4xMG5uazhkMXZnbXc4cTE3eXU3NnUwNnBuYmNqJz4KICAgICAgPGNvbHVtbiBkYXRhdHlwZT0nc3RyaW5nJyBuYW1lPSdbOk1lYXN1cmUgTmFtZXNdJyByb2xlPSdkaW1lbnNpb24nIHR5cGU9J25vbWluYWwnPgogICAgICAgIDxhbGlhc2VzPgogICAgICAgICAgPGFsaWFzIGtleT0nJnF1b3Q7W2ZlZGVyYXRlZC4xMG5uazhkMXZnbXc4cTE3eXU3NnUwNnBuYmNqXS5bY3RkOkN1c3RvbWVyIE5hbWU6cWtdJnF1b3Q7JyB2YWx1ZT0nQ291bnQgb2YgQ3VzdG9tZXJzJyAvPgogICAgICAgIDwvYWxpYXNlcz4KICAgICAgPC9jb2x1bW4-CiAgICAgIDxncm91cCBjYXB0aW9uPSdBY3Rpb24gKFJlZ2lvbiknIGhpZGRlbj0ndHJ1ZScgbmFtZT0nW0FjdGlvbiAoUmVnaW9uKV0nIG5hbWUtc3R5bGU9J3VucXVhbGlmaWVkJyB1c2VyOmF1dG8tY29sdW1uPSdzaGVldF9saW5rJz4KICAgICAgICA8Z3JvdXBmaWx0ZXIgZnVuY3Rpb249J2Nyb3Nzam9pbic-CiAgICAgICAgICA8Z3JvdXBmaWx0ZXIgZnVuY3Rpb249J2xldmVsLW1lbWJlcnMnIGxldmVsPSdbUmVnaW9uXScgLz4KICAgICAgICA8L2dyb3VwZmlsdGVyPgogICAgICA8L2dyb3VwPgogICAgICA8Y29sdW1uIGNhcHRpb249J0FjdGlvbiAoUmVnaW9uKScgZGF0YXR5cGU9J3R1cGxlJyBoaWRkZW49J3RydWUnIG5hbWU9J1tBY3Rpb24gKFJlZ2lvbildJyByb2xlPSdkaW1lbnNpb24nIHR5cGU9J25vbWluYWwnIHVzZXI6YXV0by1jb2x1bW49J3NoZWV0X2xpbmsnIC8-CiAgICAgIDxjb2x1bW4taW5zdGFuY2UgY29sdW1uPSdbQ2F0ZWdvcnldJyBkZXJpdmF0aW9uPSdOb25lJyBuYW1lPSdbbm9uZTpDYXRlZ29yeTpua10nIHBpdm90PSdrZXknIHR5cGU9J25vbWluYWwnIC8-CiAgICAgIDxjb2x1bW4taW5zdGFuY2UgY29sdW1uPSdbT3JkZXIgRGF0ZV0nIGRlcml2YXRpb249J05vbmUnIG5hbWU9J1tub25lOk9yZGVyIERhdGU6cWtdJyBwaXZvdD0na2V5JyB0eXBlPSdxdWFudGl0YXRpdmUnIC8-CiAgICAgIDxjb2x1bW4taW5zdGFuY2UgY29sdW1uPSdbUmVnaW9uXScgZGVyaXZhdGlvbj0nTm9uZScgbmFtZT0nW25vbmU6UmVnaW9uOm5rXScgcGl2b3Q9J2tleScgdHlwZT0nbm9taW5hbCcgLz4KICAgICAgPGNvbHVtbi1pbnN0YW5jZSBjb2x1bW49J1tTZWdtZW50XScgZGVyaXZhdGlvbj0nTm9uZScgbmFtZT0nW25vbmU6U2VnbWVudDpua10nIHBpdm90PSdrZXknIHR5cGU9J25vbWluYWwnIC8-CiAgICAgIDxjb2x1bW4taW5zdGFuY2UgY29sdW1uPSdbT3JkZXIgRGF0ZV0nIGRlcml2YXRpb249J1F1YXJ0ZXInIG5hbWU9J1txcjpPcmRlciBEYXRlOm9rXScgcGl2b3Q9J2tleScgdHlwZT0nb3JkaW5hbCcgLz4KICAgICAgPGNvbHVtbi1pbnN0YW5jZSBjb2x1bW49J1tPcmRlciBEYXRlXScgZGVyaXZhdGlvbj0nWWVhcicgbmFtZT0nW3lyOk9yZGVyIERhdGU6b2tdJyBwaXZvdD0na2V5JyB0eXBlPSdvcmRpbmFsJyAvPgogICAgPC9kYXRhc291cmNlPgogIDwvZGF0YXNvdXJjZXM-CiAgPHdvcmtzaGVldCBuYW1lPSdDdXN0b21lclNjYXR0ZXInPgogICAgPHRhYmxlIC8-CiAgPC93b3Jrc2hlZXQ-CiAgPHdvcmtzaGVldCBuYW1lPSdDdXN0b21lclJhbmsnPgogICAgPHRhYmxlIC8-CiAgPC93b3Jrc2hlZXQ-CiAgPHdvcmtzaGVldCBuYW1lPSdDdXN0b21lck92ZXJ2aWV3Jz4KICAgIDx0YWJsZSAvPgogIDwvd29ya3NoZWV0Pgo8L2N1c3RvbWl6ZWQtdmlldz4K" + }, + { + "isSourceView": false, + "viewName": "Shipping", + "tcv": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0ndXRmLTgnID8-Cgo8Y3VzdG9taXplZC12aWV3IGRhc2hib2FyZD0nU2hpcHBpbmcnIHNvdXJjZS1idWlsZD0nMjAyNC4yLjAgKDIwMjQyLjI0LjA3MTYuMTk0NCknIHZlcnNpb249JzE4LjEnIHhtbG5zOnVzZXI9J2h0dHA6Ly93d3cudGFibGVhdXNvZnR3YXJlLmNvbS94bWwvdXNlcic-CiAgPGFjdGl2ZSBpZD0nLTEnIC8-CiAgPGRhdGFzb3VyY2VzPgogICAgPGRhdGFzb3VyY2UgbmFtZT0nZmVkZXJhdGVkLjEwbm5rOGQxdmdtdzhxMTd5dTc2dTA2cG5iY2onPgogICAgICA8Z3JvdXAgY2FwdGlvbj0nQWN0aW9uIChEZWxheWVkPyknIGhpZGRlbj0ndHJ1ZScgbmFtZT0nW0FjdGlvbiAoRGVsYXllZD8pXScgbmFtZS1zdHlsZT0ndW5xdWFsaWZpZWQnIHVzZXI6YXV0by1jb2x1bW49J3NoZWV0X2xpbmsnPgogICAgICAgIDxncm91cGZpbHRlciBmdW5jdGlvbj0nY3Jvc3Nqb2luJz4KICAgICAgICAgIDxncm91cGZpbHRlciBmdW5jdGlvbj0nbGV2ZWwtbWVtYmVycycgbGV2ZWw9J1tDYWxjdWxhdGlvbl82NDAxMTAzMTcxMjU5NzIzXScgLz4KICAgICAgICA8L2dyb3VwZmlsdGVyPgogICAgICA8L2dyb3VwPgogICAgICA8Y29sdW1uIGNhcHRpb249J0FjdGlvbiAoRGVsYXllZD8pJyBkYXRhdHlwZT0ndHVwbGUnIGhpZGRlbj0ndHJ1ZScgbmFtZT0nW0FjdGlvbiAoRGVsYXllZD8pXScgcm9sZT0nZGltZW5zaW9uJyB0eXBlPSdub21pbmFsJyB1c2VyOmF1dG8tY29sdW1uPSdzaGVldF9saW5rJyAvPgogICAgICA8Z3JvdXAgY2FwdGlvbj0nQWN0aW9uIChTaGlwIFN0YXR1cyknIGhpZGRlbj0ndHJ1ZScgbmFtZT0nW0FjdGlvbiAoU2hpcCBTdGF0dXMpXScgbmFtZS1zdHlsZT0ndW5xdWFsaWZpZWQnIHVzZXI6YXV0by1jb2x1bW49J3NoZWV0X2xpbmsnPgogICAgICAgIDxncm91cGZpbHRlciBmdW5jdGlvbj0nY3Jvc3Nqb2luJz4KICAgICAgICAgIDxncm91cGZpbHRlciBmdW5jdGlvbj0nbGV2ZWwtbWVtYmVycycgbGV2ZWw9J1tDYWxjdWxhdGlvbl82NDAxMTAzMTcxMjU5NzIzXScgLz4KICAgICAgICA8L2dyb3VwZmlsdGVyPgogICAgICA8L2dyb3VwPgogICAgICA8Y29sdW1uIGNhcHRpb249J0FjdGlvbiAoU2hpcCBTdGF0dXMpJyBkYXRhdHlwZT0ndHVwbGUnIGhpZGRlbj0ndHJ1ZScgbmFtZT0nW0FjdGlvbiAoU2hpcCBTdGF0dXMpXScgcm9sZT0nZGltZW5zaW9uJyB0eXBlPSdub21pbmFsJyB1c2VyOmF1dG8tY29sdW1uPSdzaGVldF9saW5rJyAvPgogICAgICA8Z3JvdXAgY2FwdGlvbj0nQWN0aW9uIChTaGlwIFN0YXR1cyxZRUFSKE9yZGVyIERhdGUpLFdFRUsoT3JkZXIgRGF0ZSkpJyBoaWRkZW49J3RydWUnIG5hbWU9J1tBY3Rpb24gKFNoaXAgU3RhdHVzLFlFQVIoT3JkZXIgRGF0ZSksV0VFSyhPcmRlciBEYXRlKSldJyBuYW1lLXN0eWxlPSd1bnF1YWxpZmllZCcgdXNlcjphdXRvLWNvbHVtbj0nc2hlZXRfbGluayc-CiAgICAgICAgPGdyb3VwZmlsdGVyIGZ1bmN0aW9uPSdjcm9zc2pvaW4nPgogICAgICAgICAgPGdyb3VwZmlsdGVyIGZ1bmN0aW9uPSdsZXZlbC1tZW1iZXJzJyBsZXZlbD0nW0NhbGN1bGF0aW9uXzY0MDExMDMxNzEyNTk3MjNdJyAvPgogICAgICAgICAgPGdyb3VwZmlsdGVyIGZ1bmN0aW9uPSdsZXZlbC1tZW1iZXJzJyBsZXZlbD0nW3lyOk9yZGVyIERhdGU6b2tdJyAvPgogICAgICAgICAgPGdyb3VwZmlsdGVyIGZ1bmN0aW9uPSdsZXZlbC1tZW1iZXJzJyBsZXZlbD0nW3R3azpPcmRlciBEYXRlOm9rXScgLz4KICAgICAgICA8L2dyb3VwZmlsdGVyPgogICAgICA8L2dyb3VwPgogICAgICA8Y29sdW1uIGNhcHRpb249J0FjdGlvbiAoU2hpcCBTdGF0dXMsWUVBUihPcmRlciBEYXRlKSxXRUVLKE9yZGVyIERhdGUpKScgZGF0YXR5cGU9J3R1cGxlJyBoaWRkZW49J3RydWUnIG5hbWU9J1tBY3Rpb24gKFNoaXAgU3RhdHVzLFlFQVIoT3JkZXIgRGF0ZSksV0VFSyhPcmRlciBEYXRlKSldJyByb2xlPSdkaW1lbnNpb24nIHR5cGU9J25vbWluYWwnIHVzZXI6YXV0by1jb2x1bW49J3NoZWV0X2xpbmsnIC8-CiAgICAgIDxjb2x1bW4taW5zdGFuY2UgY29sdW1uPSdbQ2FsY3VsYXRpb25fNjQwMTEwMzE3MTI1OTcyM10nIGRlcml2YXRpb249J05vbmUnIG5hbWU9J1tub25lOkNhbGN1bGF0aW9uXzY0MDExMDMxNzEyNTk3MjM6bmtdJyBwaXZvdD0na2V5JyB0eXBlPSdub21pbmFsJyAvPgogICAgICA8Y29sdW1uLWluc3RhbmNlIGNvbHVtbj0nW1JlZ2lvbl0nIGRlcml2YXRpb249J05vbmUnIG5hbWU9J1tub25lOlJlZ2lvbjpua10nIHBpdm90PSdrZXknIHR5cGU9J25vbWluYWwnIC8-CiAgICAgIDxjb2x1bW4taW5zdGFuY2UgY29sdW1uPSdbU2hpcCBNb2RlXScgZGVyaXZhdGlvbj0nTm9uZScgbmFtZT0nW25vbmU6U2hpcCBNb2RlOm5rXScgcGl2b3Q9J2tleScgdHlwZT0nbm9taW5hbCcgLz4KICAgICAgPGNvbHVtbi1pbnN0YW5jZSBjb2x1bW49J1tPcmRlciBEYXRlXScgZGVyaXZhdGlvbj0nUXVhcnRlcicgbmFtZT0nW3FyOk9yZGVyIERhdGU6b2tdJyBwaXZvdD0na2V5JyB0eXBlPSdvcmRpbmFsJyAvPgogICAgICA8Y29sdW1uLWluc3RhbmNlIGNvbHVtbj0nW09yZGVyIERhdGVdJyBkZXJpdmF0aW9uPSdZZWFyJyBuYW1lPSdbeXI6T3JkZXIgRGF0ZTpva10nIHBpdm90PSdrZXknIHR5cGU9J29yZGluYWwnIC8-CiAgICA8L2RhdGFzb3VyY2U-CiAgPC9kYXRhc291cmNlcz4KICA8d29ya3NoZWV0IG5hbWU9J1NoaXBTdW1tYXJ5Jz4KICAgIDx0YWJsZSAvPgogIDwvd29ya3NoZWV0PgogIDx3b3Jrc2hlZXQgbmFtZT0nU2hpcHBpbmdUcmVuZCc-CiAgICA8dGFibGUgLz4KICA8L3dvcmtzaGVldD4KICA8d29ya3NoZWV0IG5hbWU9J0RheXN0b1NoaXAnPgogICAgPHRhYmxlIC8-CiAgPC93b3Jrc2hlZXQ-CjwvY3VzdG9taXplZC12aWV3Pgo=" + }, + { + "isSourceView": false, + "viewName": "Performance", + "tcv": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0ndXRmLTgnID8-Cgo8Y3VzdG9taXplZC12aWV3IHNvdXJjZS1idWlsZD0nMjAyNC4yLjAgKDIwMjQyLjI0LjA3MTYuMTk0NCknIHZlcnNpb249JzE4LjEnIHhtbG5zOnVzZXI9J2h0dHA6Ly93d3cudGFibGVhdXNvZnR3YXJlLmNvbS94bWwvdXNlcic-CiAgPGRhdGFzb3VyY2VzPgogICAgPGRhdGFzb3VyY2UgbmFtZT0nZmVkZXJhdGVkLjEwbm5rOGQxdmdtdzhxMTd5dTc2dTA2cG5iY2onPgogICAgICA8Y29sdW1uLWluc3RhbmNlIGNvbHVtbj0nW1JlZ2lvbl0nIGRlcml2YXRpb249J05vbmUnIG5hbWU9J1tub25lOlJlZ2lvbjpua10nIHBpdm90PSdrZXknIHR5cGU9J25vbWluYWwnIC8-CiAgICAgIDxjb2x1bW4taW5zdGFuY2UgY29sdW1uPSdbT3JkZXIgRGF0ZV0nIGRlcml2YXRpb249J1llYXInIG5hbWU9J1t5cjpPcmRlciBEYXRlOm9rXScgcGl2b3Q9J2tleScgdHlwZT0nb3JkaW5hbCcgLz4KICAgIDwvZGF0YXNvdXJjZT4KICA8L2RhdGFzb3VyY2VzPgogIDx3b3Jrc2hlZXQgbmFtZT0nUGVyZm9ybWFuY2UnPgogICAgPHRhYmxlIC8-CiAgPC93b3Jrc2hlZXQ-CjwvY3VzdG9taXplZC12aWV3Pgo=" + }, + { + "isSourceView": false, + "viewName": "Commission Model", + "tcv": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0ndXRmLTgnID8-Cgo8Y3VzdG9taXplZC12aWV3IGRhc2hib2FyZD0nQ29tbWlzc2lvbiBNb2RlbCcgc291cmNlLWJ1aWxkPScyMDI0LjIuMCAoMjAyNDIuMjQuMDcxNi4xOTQ0KScgdmVyc2lvbj0nMTguMScgeG1sbnM6dXNlcj0naHR0cDovL3d3dy50YWJsZWF1c29mdHdhcmUuY29tL3htbC91c2VyJz4KICA8YWN0aXZlIGlkPSctMScgLz4KICA8ZGF0YXNvdXJjZXM-CiAgICA8ZGF0YXNvdXJjZSBuYW1lPSdmZWRlcmF0ZWQuMGEwMWNvZDFveGw4M2wxZjV5dmVzMWNmY2lxbyc-CiAgICAgIDxjb2x1bW4gZGF0YXR5cGU9J3N0cmluZycgbmFtZT0nWzpNZWFzdXJlIE5hbWVzXScgcm9sZT0nZGltZW5zaW9uJyB0eXBlPSdub21pbmFsJyAvPgogICAgPC9kYXRhc291cmNlPgogIDwvZGF0YXNvdXJjZXM-CiAgPHdvcmtzaGVldCBuYW1lPSdRdW90YUF0dGFpbm1lbnQnPgogICAgPHRhYmxlIC8-CiAgPC93b3Jrc2hlZXQ-CiAgPHdvcmtzaGVldCBuYW1lPSdDb21taXNzaW9uUHJvamVjdGlvbic-CiAgICA8dGFibGUgLz4KICA8L3dvcmtzaGVldD4KICA8d29ya3NoZWV0IG5hbWU9J1NhbGVzJz4KICAgIDx0YWJsZSAvPgogIDwvd29ya3NoZWV0PgogIDx3b3Jrc2hlZXQgbmFtZT0nT1RFJz4KICAgIDx0YWJsZSAvPgogIDwvd29ya3NoZWV0Pgo8L2N1c3RvbWl6ZWQtdmlldz4K" + }, + { + "isSourceView": false, + "viewName": "Order Details", + "tcv": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0ndXRmLTgnID8-Cgo8Y3VzdG9taXplZC12aWV3IGRhc2hib2FyZD0nT3JkZXIgRGV0YWlscycgc291cmNlLWJ1aWxkPScyMDI0LjIuMCAoMjAyNDIuMjQuMDcxNi4xOTQ0KScgdmVyc2lvbj0nMTguMScgeG1sbnM6dXNlcj0naHR0cDovL3d3dy50YWJsZWF1c29mdHdhcmUuY29tL3htbC91c2VyJz4KICA8YWN0aXZlIGlkPSctMScgLz4KICA8ZGF0YXNvdXJjZXM-CiAgICA8ZGF0YXNvdXJjZSBuYW1lPSdmZWRlcmF0ZWQuMTBubms4ZDF2Z213OHExN3l1NzZ1MDZwbmJjaic-CiAgICAgIDxjb2x1bW4gZGF0YXR5cGU9J3N0cmluZycgbmFtZT0nWzpNZWFzdXJlIE5hbWVzXScgcm9sZT0nZGltZW5zaW9uJyB0eXBlPSdub21pbmFsJz4KICAgICAgICA8YWxpYXNlcz4KICAgICAgICAgIDxhbGlhcyBrZXk9JyZxdW90O1tmZWRlcmF0ZWQuMTBubms4ZDF2Z213OHExN3l1NzZ1MDZwbmJjal0uW2N0ZDpDdXN0b21lciBOYW1lOnFrXSZxdW90OycgdmFsdWU9J0NvdW50IG9mIEN1c3RvbWVycycgLz4KICAgICAgICA8L2FsaWFzZXM-CiAgICAgIDwvY29sdW1uPgogICAgICA8Z3JvdXAgY2FwdGlvbj0nQWN0aW9uIChPcmRlciBQcm9maXRhYmxlPyxNT05USChPcmRlciBEYXRlKSxTZWdtZW50KScgaGlkZGVuPSd0cnVlJyBuYW1lPSdbQWN0aW9uIChPcmRlciBQcm9maXRhYmxlPyxNT05USChPcmRlciBEYXRlKSxTZWdtZW50KV0nIG5hbWUtc3R5bGU9J3VucXVhbGlmaWVkJyB1c2VyOmF1dG8tY29sdW1uPSdzaGVldF9saW5rJz4KICAgICAgICA8Z3JvdXBmaWx0ZXIgZnVuY3Rpb249J2Nyb3Nzam9pbic-CiAgICAgICAgICA8Z3JvdXBmaWx0ZXIgZnVuY3Rpb249J2xldmVsLW1lbWJlcnMnIGxldmVsPSdbQ2FsY3VsYXRpb25fOTA2MDEyMjEwNDk0NzQ3MV0nIC8-CiAgICAgICAgICA8Z3JvdXBmaWx0ZXIgZnVuY3Rpb249J2xldmVsLW1lbWJlcnMnIGxldmVsPSdbdG1uOk9yZGVyIERhdGU6b2tdJyAvPgogICAgICAgICAgPGdyb3VwZmlsdGVyIGZ1bmN0aW9uPSdsZXZlbC1tZW1iZXJzJyBsZXZlbD0nW1NlZ21lbnRdJyAvPgogICAgICAgIDwvZ3JvdXBmaWx0ZXI-CiAgICAgIDwvZ3JvdXA-CiAgICAgIDxjb2x1bW4gY2FwdGlvbj0nQWN0aW9uIChPcmRlciBQcm9maXRhYmxlPyxNT05USChPcmRlciBEYXRlKSxTZWdtZW50KScgZGF0YXR5cGU9J3R1cGxlJyBoaWRkZW49J3RydWUnIG5hbWU9J1tBY3Rpb24gKE9yZGVyIFByb2ZpdGFibGU_LE1PTlRIKE9yZGVyIERhdGUpLFNlZ21lbnQpXScgcm9sZT0nZGltZW5zaW9uJyB0eXBlPSdub21pbmFsJyB1c2VyOmF1dG8tY29sdW1uPSdzaGVldF9saW5rJyAvPgogICAgICA8Z3JvdXAgY2FwdGlvbj0nQWN0aW9uIChQb3N0YWwgQ29kZSxTdGF0ZS9Qcm92aW5jZSkgMScgaGlkZGVuPSd0cnVlJyBuYW1lPSdbQWN0aW9uIChQb3N0YWwgQ29kZSxTdGF0ZS9Qcm92aW5jZSkgMV0nIG5hbWUtc3R5bGU9J3VucXVhbGlmaWVkJyB1c2VyOmF1dG8tY29sdW1uPSdzaGVldF9saW5rJz4KICAgICAgICA8Z3JvdXBmaWx0ZXIgZnVuY3Rpb249J2Nyb3Nzam9pbic-CiAgICAgICAgICA8Z3JvdXBmaWx0ZXIgZnVuY3Rpb249J2xldmVsLW1lbWJlcnMnIGxldmVsPSdbUG9zdGFsIENvZGVdJyAvPgogICAgICAgICAgPGdyb3VwZmlsdGVyIGZ1bmN0aW9uPSdsZXZlbC1tZW1iZXJzJyBsZXZlbD0nW1N0YXRlL1Byb3ZpbmNlXScgLz4KICAgICAgICA8L2dyb3VwZmlsdGVyPgogICAgICA8L2dyb3VwPgogICAgICA8Y29sdW1uIGNhcHRpb249J0FjdGlvbiAoUG9zdGFsIENvZGUsU3RhdGUvUHJvdmluY2UpIDEnIGRhdGF0eXBlPSd0dXBsZScgaGlkZGVuPSd0cnVlJyBuYW1lPSdbQWN0aW9uIChQb3N0YWwgQ29kZSxTdGF0ZS9Qcm92aW5jZSkgMV0nIHJvbGU9J2RpbWVuc2lvbicgdHlwZT0nbm9taW5hbCcgdXNlcjphdXRvLWNvbHVtbj0nc2hlZXRfbGluaycgLz4KICAgICAgPGNvbHVtbi1pbnN0YW5jZSBjb2x1bW49J1tDYXRlZ29yeV0nIGRlcml2YXRpb249J05vbmUnIG5hbWU9J1tub25lOkNhdGVnb3J5Om5rXScgcGl2b3Q9J2tleScgdHlwZT0nbm9taW5hbCcgLz4KICAgICAgPGNvbHVtbi1pbnN0YW5jZSBjb2x1bW49J1tDaXR5XScgZGVyaXZhdGlvbj0nTm9uZScgbmFtZT0nW25vbmU6Q2l0eTpua10nIHBpdm90PSdrZXknIHR5cGU9J25vbWluYWwnIC8-CiAgICAgIDxjb2x1bW4taW5zdGFuY2UgY29sdW1uPSdbT3JkZXIgRGF0ZV0nIGRlcml2YXRpb249J05vbmUnIG5hbWU9J1tub25lOk9yZGVyIERhdGU6b2tdJyBwaXZvdD0na2V5JyB0eXBlPSdvcmRpbmFsJyAvPgogICAgICA8Y29sdW1uLWluc3RhbmNlIGNvbHVtbj0nW09yZGVyIERhdGVdJyBkZXJpdmF0aW9uPSdOb25lJyBuYW1lPSdbbm9uZTpPcmRlciBEYXRlOnFrXScgcGl2b3Q9J2tleScgdHlwZT0ncXVhbnRpdGF0aXZlJyAvPgogICAgICA8Y29sdW1uLWluc3RhbmNlIGNvbHVtbj0nW1JlZ2lvbl0nIGRlcml2YXRpb249J05vbmUnIG5hbWU9J1tub25lOlJlZ2lvbjpua10nIHBpdm90PSdrZXknIHR5cGU9J25vbWluYWwnIC8-CiAgICAgIDxjb2x1bW4taW5zdGFuY2UgY29sdW1uPSdbU2VnbWVudF0nIGRlcml2YXRpb249J05vbmUnIG5hbWU9J1tub25lOlNlZ21lbnQ6bmtdJyBwaXZvdD0na2V5JyB0eXBlPSdub21pbmFsJyAvPgogICAgICA8Y29sdW1uLWluc3RhbmNlIGNvbHVtbj0nW1N0YXRlL1Byb3ZpbmNlXScgZGVyaXZhdGlvbj0nTm9uZScgbmFtZT0nW25vbmU6U3RhdGUvUHJvdmluY2U6bmtdJyBwaXZvdD0na2V5JyB0eXBlPSdub21pbmFsJyAvPgogICAgPC9kYXRhc291cmNlPgogIDwvZGF0YXNvdXJjZXM-CiAgPHdvcmtzaGVldCBuYW1lPSdQcm9kdWN0IERldGFpbCBTaGVldCc-CiAgICA8dGFibGUgLz4KICA8L3dvcmtzaGVldD4KPC9jdXN0b21pemVkLXZpZXc-Cg==" + }, + { + "isSourceView": false, + "viewName": "Forecast", + "tcv": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0ndXRmLTgnID8-Cgo8Y3VzdG9taXplZC12aWV3IHNvdXJjZS1idWlsZD0nMjAyNC4yLjAgKDIwMjQyLjI0LjA3MTYuMTk0NCknIHZlcnNpb249JzE4LjEnIHhtbG5zOnVzZXI9J2h0dHA6Ly93d3cudGFibGVhdXNvZnR3YXJlLmNvbS94bWwvdXNlcic-CiAgPGRhdGFzb3VyY2VzPgogICAgPGRhdGFzb3VyY2UgbmFtZT0nZmVkZXJhdGVkLjEwbm5rOGQxdmdtdzhxMTd5dTc2dTA2cG5iY2onPgogICAgICA8Y29sdW1uLWluc3RhbmNlIGNvbHVtbj0nW09yZGVyIERhdGVdJyBkZXJpdmF0aW9uPSdOb25lJyBuYW1lPSdbbm9uZTpPcmRlciBEYXRlOnFrXScgcGl2b3Q9J2tleScgdHlwZT0ncXVhbnRpdGF0aXZlJyAvPgogICAgICA8Y29sdW1uLWluc3RhbmNlIGNvbHVtbj0nW1JlZ2lvbl0nIGRlcml2YXRpb249J05vbmUnIG5hbWU9J1tub25lOlJlZ2lvbjpua10nIHBpdm90PSdrZXknIHR5cGU9J25vbWluYWwnIC8-CiAgICA8L2RhdGFzb3VyY2U-CiAgPC9kYXRhc291cmNlcz4KICA8d29ya3NoZWV0IG5hbWU9J0ZvcmVjYXN0Jz4KICAgIDx0YWJsZSAvPgogIDwvd29ya3NoZWV0Pgo8L2N1c3RvbWl6ZWQtdmlldz4K" + }, + { + "isSourceView": false, + "viewName": "What If Forecast", + "tcv": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0ndXRmLTgnID8-Cgo8Y3VzdG9taXplZC12aWV3IHNvdXJjZS1idWlsZD0nMjAyNC4yLjAgKDIwMjQyLjI0LjA3MTYuMTk0NCknIHZlcnNpb249JzE4LjEnIHhtbG5zOnVzZXI9J2h0dHA6Ly93d3cudGFibGVhdXNvZnR3YXJlLmNvbS94bWwvdXNlcic-CiAgPGRhdGFzb3VyY2VzPgogICAgPGRhdGFzb3VyY2UgbmFtZT0nZmVkZXJhdGVkLjEwbm5rOGQxdmdtdzhxMTd5dTc2dTA2cG5iY2onPgogICAgICA8Y29sdW1uIGRhdGF0eXBlPSdzdHJpbmcnIG5hbWU9J1s6TWVhc3VyZSBOYW1lc10nIHJvbGU9J2RpbWVuc2lvbicgdHlwZT0nbm9taW5hbCc-CiAgICAgICAgPGFsaWFzZXM-CiAgICAgICAgICA8YWxpYXMga2V5PScmcXVvdDtbZmVkZXJhdGVkLjEwbm5rOGQxdmdtdzhxMTd5dTc2dTA2cG5iY2pdLltjdGQ6Q3VzdG9tZXIgTmFtZTpxa10mcXVvdDsnIHZhbHVlPSdDb3VudCBvZiBDdXN0b21lcnMnIC8-CiAgICAgICAgPC9hbGlhc2VzPgogICAgICA8L2NvbHVtbj4KICAgICAgPGNvbHVtbi1pbnN0YW5jZSBjb2x1bW49J1tPcmRlciBEYXRlXScgZGVyaXZhdGlvbj0nTm9uZScgbmFtZT0nW25vbmU6T3JkZXIgRGF0ZTpxa10nIHBpdm90PSdrZXknIHR5cGU9J3F1YW50aXRhdGl2ZScgLz4KICAgICAgPGNvbHVtbi1pbnN0YW5jZSBjb2x1bW49J1tSZWdpb25dJyBkZXJpdmF0aW9uPSdOb25lJyBuYW1lPSdbbm9uZTpSZWdpb246bmtdJyBwaXZvdD0na2V5JyB0eXBlPSdub21pbmFsJyAvPgogICAgICA8Y29sdW1uLWluc3RhbmNlIGNvbHVtbj0nW09yZGVyIERhdGVdJyBkZXJpdmF0aW9uPSdZZWFyJyBuYW1lPSdbeXI6T3JkZXIgRGF0ZTpva10nIHBpdm90PSdrZXknIHR5cGU9J29yZGluYWwnIC8-CiAgICA8L2RhdGFzb3VyY2U-CiAgPC9kYXRhc291cmNlcz4KICA8d29ya3NoZWV0IG5hbWU9J1doYXQgSWYgRm9yZWNhc3QnPgogICAgPHRhYmxlIC8-CiAgPC93b3Jrc2hlZXQ-CjwvY3VzdG9taXplZC12aWV3Pgo=" + } +] \ No newline at end of file diff --git a/test/assets/custom_view_get.xml b/test/assets/custom_view_get.xml new file mode 100644 index 000000000..67e342f30 --- /dev/null +++ b/test/assets/custom_view_get.xml @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff --git a/test/assets/custom_view_get_id.xml b/test/assets/custom_view_get_id.xml new file mode 100644 index 000000000..14e589b8d --- /dev/null +++ b/test/assets/custom_view_get_id.xml @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/test/assets/custom_view_update.xml b/test/assets/custom_view_update.xml new file mode 100644 index 000000000..5ab85bc05 --- /dev/null +++ b/test/assets/custom_view_update.xml @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/test/assets/data_acceleration_report.xml b/test/assets/data_acceleration_report.xml new file mode 100644 index 000000000..51b86a691 --- /dev/null +++ b/test/assets/data_acceleration_report.xml @@ -0,0 +1,20 @@ + + + + + + + \ No newline at end of file diff --git a/test/assets/data_alerts_add_user.xml b/test/assets/data_alerts_add_user.xml new file mode 100644 index 000000000..2a367a7f1 --- /dev/null +++ b/test/assets/data_alerts_add_user.xml @@ -0,0 +1,7 @@ + + + + \ No newline at end of file diff --git a/test/assets/data_alerts_get.xml b/test/assets/data_alerts_get.xml new file mode 100644 index 000000000..78a55d4ca --- /dev/null +++ b/test/assets/data_alerts_get.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/data_alerts_get_by_id.xml b/test/assets/data_alerts_get_by_id.xml new file mode 100644 index 000000000..1a7456545 --- /dev/null +++ b/test/assets/data_alerts_get_by_id.xml @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/data_alerts_update.xml b/test/assets/data_alerts_update.xml new file mode 100644 index 000000000..78a55d4ca --- /dev/null +++ b/test/assets/data_alerts_update.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/database_get.xml b/test/assets/database_get.xml new file mode 100644 index 000000000..7d22daf4c --- /dev/null +++ b/test/assets/database_get.xml @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/database_populate_permissions.xml b/test/assets/database_populate_permissions.xml new file mode 100644 index 000000000..21f30fea9 --- /dev/null +++ b/test/assets/database_populate_permissions.xml @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/test/assets/database_update.xml b/test/assets/database_update.xml new file mode 100644 index 000000000..b2cbd68c9 --- /dev/null +++ b/test/assets/database_update.xml @@ -0,0 +1,9 @@ + + + + + + + + \ No newline at end of file diff --git a/test/assets/datasource_data_update.xml b/test/assets/datasource_data_update.xml new file mode 100644 index 000000000..305caaf0b --- /dev/null +++ b/test/assets/datasource_data_update.xml @@ -0,0 +1,9 @@ + + + + + + 7ecaccd8-39b0-4875-a77d-094f6e930019 + + + diff --git a/test/assets/datasource_get.xml b/test/assets/datasource_get.xml index c3ccfa0da..1c420d116 100644 --- a/test/assets/datasource_get.xml +++ b/test/assets/datasource_get.xml @@ -2,12 +2,12 @@ - + - + @@ -17,4 +17,4 @@ - \ No newline at end of file + diff --git a/test/assets/datasource_get_by_id.xml b/test/assets/datasource_get_by_id.xml index 177899b15..53434b8cc 100644 --- a/test/assets/datasource_get_by_id.xml +++ b/test/assets/datasource_get_by_id.xml @@ -1,6 +1,6 @@ - + @@ -8,5 +8,6 @@ + \ No newline at end of file diff --git a/test/assets/datasource_populate_connections.xml b/test/assets/datasource_populate_connections.xml index 442a78323..eaaa24934 100644 --- a/test/assets/datasource_populate_connections.xml +++ b/test/assets/datasource_populate_connections.xml @@ -1,8 +1,7 @@ - - - + + \ No newline at end of file diff --git a/test/assets/datasource_populate_permissions.xml b/test/assets/datasource_populate_permissions.xml new file mode 100644 index 000000000..db967f4a9 --- /dev/null +++ b/test/assets/datasource_populate_permissions.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/assets/datasource_refresh.xml b/test/assets/datasource_refresh.xml new file mode 100644 index 000000000..61b4b7601 --- /dev/null +++ b/test/assets/datasource_refresh.xml @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/test/assets/datasource_revision.xml b/test/assets/datasource_revision.xml new file mode 100644 index 000000000..8cadafc8f --- /dev/null +++ b/test/assets/datasource_revision.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/dqw_by_content_type.xml b/test/assets/dqw_by_content_type.xml new file mode 100644 index 000000000..c65deb6d9 --- /dev/null +++ b/test/assets/dqw_by_content_type.xml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/test/assets/favorites_add_datasource.xml b/test/assets/favorites_add_datasource.xml new file mode 100644 index 000000000..a1f47ab4f --- /dev/null +++ b/test/assets/favorites_add_datasource.xml @@ -0,0 +1,17 @@ + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/favorites_add_project.xml b/test/assets/favorites_add_project.xml new file mode 100644 index 000000000..699e6a4cd --- /dev/null +++ b/test/assets/favorites_add_project.xml @@ -0,0 +1,11 @@ + + + + + + + + \ No newline at end of file diff --git a/test/assets/favorites_add_view.xml b/test/assets/favorites_add_view.xml new file mode 100644 index 000000000..f6fc15c9a --- /dev/null +++ b/test/assets/favorites_add_view.xml @@ -0,0 +1,14 @@ + + + + + + + + + \ No newline at end of file diff --git a/test/assets/favorites_add_workbook.xml b/test/assets/favorites_add_workbook.xml new file mode 100644 index 000000000..c8008c9b8 --- /dev/null +++ b/test/assets/favorites_add_workbook.xml @@ -0,0 +1,20 @@ + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/favorites_get.xml b/test/assets/favorites_get.xml new file mode 100644 index 000000000..3d2e2ee6a --- /dev/null +++ b/test/assets/favorites_get.xml @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/fileupload_append.xml b/test/assets/fileupload_append.xml new file mode 100644 index 000000000..325ee66a9 --- /dev/null +++ b/test/assets/fileupload_append.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/test/assets/fileupload_initialize.xml b/test/assets/fileupload_initialize.xml new file mode 100644 index 000000000..073ad0edc --- /dev/null +++ b/test/assets/fileupload_initialize.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/test/assets/flow_get.xml b/test/assets/flow_get.xml new file mode 100644 index 000000000..406cded8e --- /dev/null +++ b/test/assets/flow_get.xml @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/flow_get_by_id.xml b/test/assets/flow_get_by_id.xml new file mode 100644 index 000000000..d1c626105 --- /dev/null +++ b/test/assets/flow_get_by_id.xml @@ -0,0 +1,10 @@ + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/flow_populate_connections.xml b/test/assets/flow_populate_connections.xml new file mode 100644 index 000000000..5c013770c --- /dev/null +++ b/test/assets/flow_populate_connections.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/test/assets/flow_populate_permissions.xml b/test/assets/flow_populate_permissions.xml new file mode 100644 index 000000000..ce3a22f97 --- /dev/null +++ b/test/assets/flow_populate_permissions.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/assets/flow_publish.xml b/test/assets/flow_publish.xml new file mode 100644 index 000000000..55af88d11 --- /dev/null +++ b/test/assets/flow_publish.xml @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/test/assets/flow_refresh.xml b/test/assets/flow_refresh.xml new file mode 100644 index 000000000..b2bb97a5d --- /dev/null +++ b/test/assets/flow_refresh.xml @@ -0,0 +1,11 @@ + + + + + + + + \ No newline at end of file diff --git a/test/assets/flow_runs_get.xml b/test/assets/flow_runs_get.xml new file mode 100644 index 000000000..489e8ac63 --- /dev/null +++ b/test/assets/flow_runs_get.xml @@ -0,0 +1,18 @@ + + + + + + diff --git a/test/assets/flow_runs_get_by_id.xml b/test/assets/flow_runs_get_by_id.xml new file mode 100644 index 000000000..3a768fab4 --- /dev/null +++ b/test/assets/flow_runs_get_by_id.xml @@ -0,0 +1,10 @@ + + + + \ No newline at end of file diff --git a/test/assets/flow_runs_get_by_id_failed.xml b/test/assets/flow_runs_get_by_id_failed.xml new file mode 100644 index 000000000..9e766680b --- /dev/null +++ b/test/assets/flow_runs_get_by_id_failed.xml @@ -0,0 +1,10 @@ + + + + \ No newline at end of file diff --git a/test/assets/flow_runs_get_by_id_inprogress.xml b/test/assets/flow_runs_get_by_id_inprogress.xml new file mode 100644 index 000000000..42e1a77f9 --- /dev/null +++ b/test/assets/flow_runs_get_by_id_inprogress.xml @@ -0,0 +1,10 @@ + + + + \ No newline at end of file diff --git a/test/assets/flow_update.xml b/test/assets/flow_update.xml new file mode 100644 index 000000000..5ab69f583 --- /dev/null +++ b/test/assets/flow_update.xml @@ -0,0 +1,10 @@ + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/group_add_users.xml b/test/assets/group_add_users.xml new file mode 100644 index 000000000..23fd7bd9f --- /dev/null +++ b/test/assets/group_add_users.xml @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/test/assets/group_create.xml b/test/assets/group_create.xml index 8fb3902a4..face05cf0 100644 --- a/test/assets/group_create.xml +++ b/test/assets/group_create.xml @@ -2,5 +2,7 @@ - + \ No newline at end of file diff --git a/test/assets/group_create_ad.xml b/test/assets/group_create_ad.xml new file mode 100644 index 000000000..26ddd94b0 --- /dev/null +++ b/test/assets/group_create_ad.xml @@ -0,0 +1,10 @@ + + + + + + \ No newline at end of file diff --git a/test/assets/group_update.xml b/test/assets/group_update.xml index b5dba4bc6..3c54524c0 100644 --- a/test/assets/group_update.xml +++ b/test/assets/group_update.xml @@ -2,5 +2,7 @@ - + + + \ No newline at end of file diff --git a/test/assets/group_update_async.xml b/test/assets/group_update_async.xml new file mode 100644 index 000000000..ea6b47eaa --- /dev/null +++ b/test/assets/group_update_async.xml @@ -0,0 +1,10 @@ + + + + diff --git a/test/assets/groupsets_create.xml b/test/assets/groupsets_create.xml new file mode 100644 index 000000000..233b0f939 --- /dev/null +++ b/test/assets/groupsets_create.xml @@ -0,0 +1,4 @@ + + + + diff --git a/test/assets/groupsets_get.xml b/test/assets/groupsets_get.xml new file mode 100644 index 000000000..ff3bec1fb --- /dev/null +++ b/test/assets/groupsets_get.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/test/assets/groupsets_get_by_id.xml b/test/assets/groupsets_get_by_id.xml new file mode 100644 index 000000000..558e4d870 --- /dev/null +++ b/test/assets/groupsets_get_by_id.xml @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/test/assets/groupsets_update.xml b/test/assets/groupsets_update.xml new file mode 100644 index 000000000..b64fa6ea1 --- /dev/null +++ b/test/assets/groupsets_update.xml @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/test/assets/job_get_by_id.xml b/test/assets/job_get_by_id.xml new file mode 100644 index 000000000..b142dfe2f --- /dev/null +++ b/test/assets/job_get_by_id.xml @@ -0,0 +1,14 @@ + + + + + Job detail notes + + + More detail + + + diff --git a/test/assets/job_get_by_id_failed.xml b/test/assets/job_get_by_id_failed.xml new file mode 100644 index 000000000..c7456008e --- /dev/null +++ b/test/assets/job_get_by_id_failed.xml @@ -0,0 +1,9 @@ + + + + + + c569ee62-9204-416f-843d-5ccfebc0231b + + + \ No newline at end of file diff --git a/test/assets/job_get_by_id_failed_workbook.xml b/test/assets/job_get_by_id_failed_workbook.xml new file mode 100644 index 000000000..bf81d896e --- /dev/null +++ b/test/assets/job_get_by_id_failed_workbook.xml @@ -0,0 +1,9 @@ + + + + + + java.lang.RuntimeException: [Microsoft][ODBC Driver 17 for SQL Server][SQL Server]Login failed for user.\nIntegrated authentication failed. + + + diff --git a/test/assets/job_get_by_id_inprogress.xml b/test/assets/job_get_by_id_inprogress.xml new file mode 100644 index 000000000..7a23fb99d --- /dev/null +++ b/test/assets/job_get_by_id_inprogress.xml @@ -0,0 +1,9 @@ + + + + + + c569ee62-9204-416f-843d-5ccfebc0231b + + + \ No newline at end of file diff --git a/test/assets/linked_tasks_get.xml b/test/assets/linked_tasks_get.xml new file mode 100644 index 000000000..23b7bbbbc --- /dev/null +++ b/test/assets/linked_tasks_get.xml @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + diff --git a/test/assets/linked_tasks_run_now.xml b/test/assets/linked_tasks_run_now.xml new file mode 100644 index 000000000..63cef73b1 --- /dev/null +++ b/test/assets/linked_tasks_run_now.xml @@ -0,0 +1,7 @@ + + + + diff --git a/test/assets/metadata_paged_1.json b/test/assets/metadata_paged_1.json new file mode 100644 index 000000000..c1cc0318e --- /dev/null +++ b/test/assets/metadata_paged_1.json @@ -0,0 +1,15 @@ +{ + "data": { + "publishedDatasourcesConnection": { + "pageInfo": { + "hasNextPage": true, + "endCursor": "eyJ0eXBlIjoiUHVibGlzaGVkRGF0YXNvdXJjZSIsInNjb3BlIjoic2l0ZXMvMSIsInNvcnRPcmRlclZhbHVlIjp7Imxhc3RJZCI6IjAwMzllNWQ1LTI1ZmEtMTk2Yi1jNjZlLWMwNjc1ODM5ZTBiMCJ9fQ==" + }, + "nodes": [ + { + "id": "0039e5d5-25fa-196b-c66e-c0675839e0b0" + } + ] + } + } +} \ No newline at end of file diff --git a/test/assets/metadata_paged_2.json b/test/assets/metadata_paged_2.json new file mode 100644 index 000000000..af9601d59 --- /dev/null +++ b/test/assets/metadata_paged_2.json @@ -0,0 +1,15 @@ +{ + "data": { + "publishedDatasourcesConnection": { + "pageInfo": { + "hasNextPage": true, + "endCursor": "eyJ0eXBlIjoiUHVibGlzaGVkRGF0YXNvdXJjZSIsInNjb3BlIjoic2l0ZXMvMSIsInNvcnRPcmRlclZhbHVlIjp7Imxhc3RJZCI6IjAwYjE5MWNlLTYwNTUtYWZmNS1lMjc1LWMyNjYxMGM4YzRkNiJ9fQ==" + }, + "nodes": [ + { + "id": "00b191ce-6055-aff5-e275-c26610c8c4d6" + } + ] + } + } +} \ No newline at end of file diff --git a/test/assets/metadata_paged_3.json b/test/assets/metadata_paged_3.json new file mode 100644 index 000000000..958a408ea --- /dev/null +++ b/test/assets/metadata_paged_3.json @@ -0,0 +1,15 @@ +{ + "data": { + "publishedDatasourcesConnection": { + "pageInfo": { + "hasNextPage": false, + "endCursor": "eyJ0eXBlIjoiUHVibGlzaGVkRGF0YXNvdXJjZSIsInNjb3BlIjoic2l0ZXMvMSIsInNvcnRPcmRlclZhbHVlIjp7Imxhc3RJZCI6IjAyZjNlNGQ4LTg1NmEtZGEzNi1mNmM1LWM5MDA5NDVjNTdiOSJ9fQ==" + }, + "nodes": [ + { + "id": "02f3e4d8-856a-da36-f6c5-c900945c57b9" + } + ] + } + } +} \ No newline at end of file diff --git a/test/assets/metadata_query_error.json b/test/assets/metadata_query_error.json new file mode 100644 index 000000000..1c575ee23 --- /dev/null +++ b/test/assets/metadata_query_error.json @@ -0,0 +1,29 @@ +{ + "data": { + "publishedDatasources": [ + { + "id": "01cf92b2-2d17-b656-fc48-5c25ef6d5352", + "name": "Batters (TestV1)" + }, + { + "id": "020ae1cd-c356-f1ad-a846-b0094850d22a", + "name": "SharePoint_List_sharepoint2010.test.tsi.lan" + }, + { + "id": "061493a0-c3b2-6f39-d08c-bc3f842b44af", + "name": "Batters_mongodb" + }, + { + "id": "089fe515-ad2f-89bc-94bd-69f55f69a9c2", + "name": "Sample - Superstore" + } + ] + }, + "errors": [ + { + "message": "Reached time limit of PT5S for query execution.", + "path": null, + "extensions": null + } + ] +} \ No newline at end of file diff --git a/test/assets/metadata_query_expected_dict.dict b/test/assets/metadata_query_expected_dict.dict new file mode 100644 index 000000000..241b333d4 --- /dev/null +++ b/test/assets/metadata_query_expected_dict.dict @@ -0,0 +1,9 @@ +{'pages': [{'data': {'publishedDatasourcesConnection': {'nodes': [{'id': '0039e5d5-25fa-196b-c66e-c0675839e0b0'}], + 'pageInfo': {'endCursor': 'eyJ0eXBlIjoiUHVibGlzaGVkRGF0YXNvdXJjZSIsInNjb3BlIjoic2l0ZXMvMSIsInNvcnRPcmRlclZhbHVlIjp7Imxhc3RJZCI6IjAwMzllNWQ1LTI1ZmEtMTk2Yi1jNjZlLWMwNjc1ODM5ZTBiMCJ9fQ==', + 'hasNextPage': True}}}}, + {'data': {'publishedDatasourcesConnection': {'nodes': [{'id': '00b191ce-6055-aff5-e275-c26610c8c4d6'}], + 'pageInfo': {'endCursor': 'eyJ0eXBlIjoiUHVibGlzaGVkRGF0YXNvdXJjZSIsInNjb3BlIjoic2l0ZXMvMSIsInNvcnRPcmRlclZhbHVlIjp7Imxhc3RJZCI6IjAwYjE5MWNlLTYwNTUtYWZmNS1lMjc1LWMyNjYxMGM4YzRkNiJ9fQ==', + 'hasNextPage': True}}}}, + {'data': {'publishedDatasourcesConnection': {'nodes': [{'id': '02f3e4d8-856a-da36-f6c5-c900945c57b9'}], + 'pageInfo': {'endCursor': 'eyJ0eXBlIjoiUHVibGlzaGVkRGF0YXNvdXJjZSIsInNjb3BlIjoic2l0ZXMvMSIsInNvcnRPcmRlclZhbHVlIjp7Imxhc3RJZCI6IjAyZjNlNGQ4LTg1NmEtZGEzNi1mNmM1LWM5MDA5NDVjNTdiOSJ9fQ==', + 'hasNextPage': False}}}}]} \ No newline at end of file diff --git a/test/assets/metadata_query_success.json b/test/assets/metadata_query_success.json new file mode 100644 index 000000000..056f29fb6 --- /dev/null +++ b/test/assets/metadata_query_success.json @@ -0,0 +1,22 @@ +{ + "data": { + "publishedDatasources": [ + { + "id": "01cf92b2-2d17-b656-fc48-5c25ef6d5352", + "name": "Batters (TestV1)" + }, + { + "id": "020ae1cd-c356-f1ad-a846-b0094850d22a", + "name": "SharePoint_List_sharepoint2010.test.tsi.lan" + }, + { + "id": "061493a0-c3b2-6f39-d08c-bc3f842b44af", + "name": "Batters_mongodb" + }, + { + "id": "089fe515-ad2f-89bc-94bd-69f55f69a9c2", + "name": "Sample - Superstore" + } + ] + } + } \ No newline at end of file diff --git a/test/assets/metrics_get.xml b/test/assets/metrics_get.xml new file mode 100644 index 000000000..566af1074 --- /dev/null +++ b/test/assets/metrics_get.xml @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/metrics_get_by_id.xml b/test/assets/metrics_get_by_id.xml new file mode 100644 index 000000000..30652da0f --- /dev/null +++ b/test/assets/metrics_get_by_id.xml @@ -0,0 +1,16 @@ + + + + + + + + + \ No newline at end of file diff --git a/test/assets/metrics_update.xml b/test/assets/metrics_update.xml new file mode 100644 index 000000000..30652da0f --- /dev/null +++ b/test/assets/metrics_update.xml @@ -0,0 +1,16 @@ + + + + + + + + + \ No newline at end of file diff --git a/test/assets/odata_connection.xml b/test/assets/odata_connection.xml new file mode 100644 index 000000000..0c16fcca6 --- /dev/null +++ b/test/assets/odata_connection.xml @@ -0,0 +1,7 @@ + + + + + + + diff --git a/test/assets/populate_excel.xlsx b/test/assets/populate_excel.xlsx new file mode 100644 index 000000000..3cf6115c7 Binary files /dev/null and b/test/assets/populate_excel.xlsx differ diff --git a/test/assets/populate_powerpoint.pptx b/test/assets/populate_powerpoint.pptx new file mode 100644 index 000000000..dbf979c06 Binary files /dev/null and b/test/assets/populate_powerpoint.pptx differ diff --git a/test/assets/project_content_permission.xml b/test/assets/project_content_permission.xml new file mode 100644 index 000000000..18341e2ac --- /dev/null +++ b/test/assets/project_content_permission.xml @@ -0,0 +1,4 @@ + + + + diff --git a/test/assets/project_get.xml b/test/assets/project_get.xml index bd2d6e01e..ce604cd8f 100644 --- a/test/assets/project_get.xml +++ b/test/assets/project_get.xml @@ -2,11 +2,12 @@ - - + + - - + + + \ No newline at end of file diff --git a/test/assets/project_populate_permissions.xml b/test/assets/project_populate_permissions.xml new file mode 100644 index 000000000..7a49391af --- /dev/null +++ b/test/assets/project_populate_permissions.xml @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/project_populate_workbook_default_permissions.xml b/test/assets/project_populate_workbook_default_permissions.xml new file mode 100644 index 000000000..e6f3804be --- /dev/null +++ b/test/assets/project_populate_workbook_default_permissions.xml @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/project_update.xml b/test/assets/project_update.xml index eaa884627..f2485c898 100644 --- a/test/assets/project_update.xml +++ b/test/assets/project_update.xml @@ -1,4 +1,6 @@ - + + + diff --git a/test/assets/project_update_datasource_default_permissions.xml b/test/assets/project_update_datasource_default_permissions.xml new file mode 100644 index 000000000..3a70031ce --- /dev/null +++ b/test/assets/project_update_datasource_default_permissions.xml @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/queryset_slicing_page_1.xml b/test/assets/queryset_slicing_page_1.xml new file mode 100644 index 000000000..be3df91f8 --- /dev/null +++ b/test/assets/queryset_slicing_page_1.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/queryset_slicing_page_2.xml b/test/assets/queryset_slicing_page_2.xml new file mode 100644 index 000000000..058bbd5c0 --- /dev/null +++ b/test/assets/queryset_slicing_page_2.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/request_option_filter_name_in.xml b/test/assets/request_option_filter_name_in.xml new file mode 100644 index 000000000..9ec42b8ab --- /dev/null +++ b/test/assets/request_option_filter_name_in.xml @@ -0,0 +1,12 @@ + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/request_option_slicing_queryset.xml b/test/assets/request_option_slicing_queryset.xml new file mode 100644 index 000000000..34708c911 --- /dev/null +++ b/test/assets/request_option_slicing_queryset.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/schedule_add_datasource.xml b/test/assets/schedule_add_datasource.xml new file mode 100644 index 000000000..e57d2c8d2 --- /dev/null +++ b/test/assets/schedule_add_datasource.xml @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/test/assets/schedule_add_flow.xml b/test/assets/schedule_add_flow.xml new file mode 100644 index 000000000..9934c38e5 --- /dev/null +++ b/test/assets/schedule_add_flow.xml @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/test/assets/schedule_add_workbook.xml b/test/assets/schedule_add_workbook.xml new file mode 100644 index 000000000..a6adb005e --- /dev/null +++ b/test/assets/schedule_add_workbook.xml @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/test/assets/schedule_add_workbook_with_warnings.xml b/test/assets/schedule_add_workbook_with_warnings.xml new file mode 100644 index 000000000..1eac2ceef --- /dev/null +++ b/test/assets/schedule_add_workbook_with_warnings.xml @@ -0,0 +1,12 @@ + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/schedule_create_weekly.xml b/test/assets/schedule_create_weekly.xml index 624a56e25..a12a6eace 100644 --- a/test/assets/schedule_create_weekly.xml +++ b/test/assets/schedule_create_weekly.xml @@ -9,4 +9,8 @@ + + + + \ No newline at end of file diff --git a/test/assets/schedule_get.xml b/test/assets/schedule_get.xml index 66e4d6e51..db5e1a05e 100644 --- a/test/assets/schedule_get.xml +++ b/test/assets/schedule_get.xml @@ -5,5 +5,6 @@ + \ No newline at end of file diff --git a/test/assets/schedule_get_by_id.xml b/test/assets/schedule_get_by_id.xml new file mode 100644 index 000000000..943416beb --- /dev/null +++ b/test/assets/schedule_get_by_id.xml @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/test/assets/schedule_get_daily_id.xml b/test/assets/schedule_get_daily_id.xml new file mode 100644 index 000000000..99467a391 --- /dev/null +++ b/test/assets/schedule_get_daily_id.xml @@ -0,0 +1,11 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/schedule_get_hourly_id.xml b/test/assets/schedule_get_hourly_id.xml new file mode 100644 index 000000000..27c374ccf --- /dev/null +++ b/test/assets/schedule_get_hourly_id.xml @@ -0,0 +1,11 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/schedule_get_monthly_id.xml b/test/assets/schedule_get_monthly_id.xml new file mode 100644 index 000000000..3fc32cc57 --- /dev/null +++ b/test/assets/schedule_get_monthly_id.xml @@ -0,0 +1,11 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/schedule_get_monthly_id_2.xml b/test/assets/schedule_get_monthly_id_2.xml new file mode 100644 index 000000000..ca84297e7 --- /dev/null +++ b/test/assets/schedule_get_monthly_id_2.xml @@ -0,0 +1,12 @@ + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/schedule_update.xml b/test/assets/schedule_update.xml index 314925377..7b814fdbc 100644 --- a/test/assets/schedule_update.xml +++ b/test/assets/schedule_update.xml @@ -1,6 +1,6 @@ - + diff --git a/test/assets/server_info_get.xml b/test/assets/server_info_get.xml index ce4e0b322..94218502a 100644 --- a/test/assets/server_info_get.xml +++ b/test/assets/server_info_get.xml @@ -1,6 +1,6 @@ 10.1.0 -2.4 +3.10 - \ No newline at end of file + diff --git a/test/assets/server_info_wrong_site.html b/test/assets/server_info_wrong_site.html new file mode 100644 index 000000000..e92daeb2d --- /dev/null +++ b/test/assets/server_info_wrong_site.html @@ -0,0 +1,56 @@ + + + + + + Example website + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ ABCDE
12345
23456
34567
45678
56789
+ + + \ No newline at end of file diff --git a/test/assets/site_create.xml b/test/assets/site_create.xml index 9fafb5f02..9d9c4a009 100644 --- a/test/assets/site_create.xml +++ b/test/assets/site_create.xml @@ -1,4 +1,4 @@ - + \ No newline at end of file diff --git a/test/assets/site_get.xml b/test/assets/site_get.xml index e3c7a781c..7ffa91eb7 100644 --- a/test/assets/site_get.xml +++ b/test/assets/site_get.xml @@ -2,7 +2,7 @@ - - + + \ No newline at end of file diff --git a/test/assets/site_get_by_id.xml b/test/assets/site_get_by_id.xml index 98bc3e4e6..a8a1e9a5c 100644 --- a/test/assets/site_get_by_id.xml +++ b/test/assets/site_get_by_id.xml @@ -1,4 +1,4 @@ - - \ No newline at end of file + + diff --git a/test/assets/site_get_by_name.xml b/test/assets/site_get_by_name.xml index 5b3042e61..b7ae2b595 100644 --- a/test/assets/site_get_by_name.xml +++ b/test/assets/site_get_by_name.xml @@ -1,5 +1,4 @@ - - \ No newline at end of file + + diff --git a/test/assets/site_update.xml b/test/assets/site_update.xml index 716314d29..1661a426b 100644 --- a/test/assets/site_update.xml +++ b/test/assets/site_update.xml @@ -1,4 +1,4 @@ - - \ No newline at end of file + + diff --git a/test/assets/subscription_get.xml b/test/assets/subscription_get.xml index d038c8419..b66ffc927 100644 --- a/test/assets/subscription_get.xml +++ b/test/assets/subscription_get.xml @@ -4,13 +4,13 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://tableau.com/api http://tableau.com/api/ts-api-2.6.xsd"> - - + + - - + + diff --git a/test/assets/table_get.xml b/test/assets/table_get.xml new file mode 100644 index 000000000..0bd2763d5 --- /dev/null +++ b/test/assets/table_get.xml @@ -0,0 +1,21 @@ + + + + + + + +
+ + + +
+ + +
+ + +
+
+
\ No newline at end of file diff --git a/test/assets/table_update.xml b/test/assets/table_update.xml new file mode 100644 index 000000000..975f0cedb --- /dev/null +++ b/test/assets/table_update.xml @@ -0,0 +1,8 @@ + + + + + +
+
\ No newline at end of file diff --git a/test/assets/tasks_create_extract_task.xml b/test/assets/tasks_create_extract_task.xml new file mode 100644 index 000000000..9e6310fba --- /dev/null +++ b/test/assets/tasks_create_extract_task.xml @@ -0,0 +1,12 @@ + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/tasks_create_flow_task.xml b/test/assets/tasks_create_flow_task.xml new file mode 100644 index 000000000..11c9a4ff0 --- /dev/null +++ b/test/assets/tasks_create_flow_task.xml @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/tasks_no_workbook_or_datasource.xml b/test/assets/tasks_no_workbook_or_datasource.xml index 7ddbcae62..da84194bf 100644 --- a/test/assets/tasks_no_workbook_or_datasource.xml +++ b/test/assets/tasks_no_workbook_or_datasource.xml @@ -4,17 +4,17 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://tableau.com/api http://tableau.com/api/ts-api-2.6.xsd"> - + - + - + diff --git a/test/assets/tasks_run_now_response.xml b/test/assets/tasks_run_now_response.xml new file mode 100644 index 000000000..6a8860cd7 --- /dev/null +++ b/test/assets/tasks_run_now_response.xml @@ -0,0 +1,6 @@ + + + + diff --git a/test/assets/tasks_with_dataacceleration_task.xml b/test/assets/tasks_with_dataacceleration_task.xml new file mode 100644 index 000000000..beb5d59eb --- /dev/null +++ b/test/assets/tasks_with_dataacceleration_task.xml @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + 2019-12-09T20:45:04Z + + + + \ No newline at end of file diff --git a/test/assets/tasks_with_datasource.xml b/test/assets/tasks_with_datasource.xml index 68e23a417..097161bf7 100644 --- a/test/assets/tasks_with_datasource.xml +++ b/test/assets/tasks_with_datasource.xml @@ -4,7 +4,7 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://tableau.com/api http://tableau.com/api/ts-api-2.6.xsd"> - + diff --git a/test/assets/tasks_with_interval.xml b/test/assets/tasks_with_interval.xml new file mode 100644 index 000000000..a317408fb --- /dev/null +++ b/test/assets/tasks_with_interval.xml @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/tasks_with_workbook.xml b/test/assets/tasks_with_workbook.xml index 1565abf74..81e974e78 100644 --- a/test/assets/tasks_with_workbook.xml +++ b/test/assets/tasks_with_workbook.xml @@ -4,7 +4,7 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://tableau.com/api http://tableau.com/api/ts-api-2.6.xsd"> - + diff --git a/test/assets/tasks_with_workbook_and_datasource.xml b/test/assets/tasks_with_workbook_and_datasource.xml index 4389fa06c..81777bb46 100644 --- a/test/assets/tasks_with_workbook_and_datasource.xml +++ b/test/assets/tasks_with_workbook_and_datasource.xml @@ -4,19 +4,19 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://tableau.com/api http://tableau.com/api/ts-api-2.6.xsd"> - + - + - + diff --git a/test/assets/tasks_without_schedule.xml b/test/assets/tasks_without_schedule.xml new file mode 100644 index 000000000..e669bf67f --- /dev/null +++ b/test/assets/tasks_without_schedule.xml @@ -0,0 +1,12 @@ + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/user_get.xml b/test/assets/user_get.xml index 3165c3a4f..83557b2eb 100644 --- a/test/assets/user_get.xml +++ b/test/assets/user_get.xml @@ -2,7 +2,7 @@ - - + + \ No newline at end of file diff --git a/test/assets/user_populate_groups.xml b/test/assets/user_populate_groups.xml new file mode 100644 index 000000000..567f1dbf8 --- /dev/null +++ b/test/assets/user_populate_groups.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/test/assets/view_get.xml b/test/assets/view_get.xml index 36f43e255..283488a4b 100644 --- a/test/assets/view_get.xml +++ b/test/assets/view_get.xml @@ -6,11 +6,15 @@ + + + + - + - \ No newline at end of file + diff --git a/test/assets/view_get_id.xml b/test/assets/view_get_id.xml new file mode 100644 index 000000000..6110a0a3a --- /dev/null +++ b/test/assets/view_get_id.xml @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/test/assets/view_get_id_usage.xml b/test/assets/view_get_id_usage.xml new file mode 100644 index 000000000..a0cdd98db --- /dev/null +++ b/test/assets/view_get_id_usage.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + diff --git a/test/assets/view_get_usage.xml b/test/assets/view_get_usage.xml index a6844879d..741e607e7 100644 --- a/test/assets/view_get_usage.xml +++ b/test/assets/view_get_usage.xml @@ -8,11 +8,11 @@ - + - \ No newline at end of file + diff --git a/test/assets/view_populate_permissions.xml b/test/assets/view_populate_permissions.xml new file mode 100644 index 000000000..e73616f46 --- /dev/null +++ b/test/assets/view_populate_permissions.xml @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/view_update_permissions.xml b/test/assets/view_update_permissions.xml new file mode 100644 index 000000000..2e78a4a90 --- /dev/null +++ b/test/assets/view_update_permissions.xml @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/test/assets/virtual_connection_add_permissions.xml b/test/assets/virtual_connection_add_permissions.xml new file mode 100644 index 000000000..d8b052848 --- /dev/null +++ b/test/assets/virtual_connection_add_permissions.xml @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/test/assets/virtual_connection_database_connection_update.xml b/test/assets/virtual_connection_database_connection_update.xml new file mode 100644 index 000000000..a6135d604 --- /dev/null +++ b/test/assets/virtual_connection_database_connection_update.xml @@ -0,0 +1,6 @@ + + + + diff --git a/test/assets/virtual_connection_populate_connections.xml b/test/assets/virtual_connection_populate_connections.xml new file mode 100644 index 000000000..77d899520 --- /dev/null +++ b/test/assets/virtual_connection_populate_connections.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/test/assets/virtual_connections_download.xml b/test/assets/virtual_connections_download.xml new file mode 100644 index 000000000..889e70ce7 --- /dev/null +++ b/test/assets/virtual_connections_download.xml @@ -0,0 +1,7 @@ + + + + + {"policyCollection":{"luid":"34ae5eb9-ceac-4158-86f1-a5d8163d5261","policies":[]},"revision":{"luid":"1b2e2aae-b904-4f5a-aa4d-9f114b8e5f57","revisableProperties":{}}} + + diff --git a/test/assets/virtual_connections_get.xml b/test/assets/virtual_connections_get.xml new file mode 100644 index 000000000..f1f410e4c --- /dev/null +++ b/test/assets/virtual_connections_get.xml @@ -0,0 +1,14 @@ + + + + + + + diff --git a/test/assets/virtual_connections_publish.xml b/test/assets/virtual_connections_publish.xml new file mode 100644 index 000000000..889e70ce7 --- /dev/null +++ b/test/assets/virtual_connections_publish.xml @@ -0,0 +1,7 @@ + + + + + {"policyCollection":{"luid":"34ae5eb9-ceac-4158-86f1-a5d8163d5261","policies":[]},"revision":{"luid":"1b2e2aae-b904-4f5a-aa4d-9f114b8e5f57","revisableProperties":{}}} + + diff --git a/test/assets/virtual_connections_revisions.xml b/test/assets/virtual_connections_revisions.xml new file mode 100644 index 000000000..374113427 --- /dev/null +++ b/test/assets/virtual_connections_revisions.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/test/assets/virtual_connections_update.xml b/test/assets/virtual_connections_update.xml new file mode 100644 index 000000000..60d5d1697 --- /dev/null +++ b/test/assets/virtual_connections_update.xml @@ -0,0 +1,8 @@ + + + + + + + diff --git a/test/assets/webhook_create.xml b/test/assets/webhook_create.xml new file mode 100644 index 000000000..24a5ca99b --- /dev/null +++ b/test/assets/webhook_create.xml @@ -0,0 +1,12 @@ + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/webhook_create_request.xml b/test/assets/webhook_create_request.xml new file mode 100644 index 000000000..0578c2c48 --- /dev/null +++ b/test/assets/webhook_create_request.xml @@ -0,0 +1 @@ + diff --git a/test/assets/webhook_get.xml b/test/assets/webhook_get.xml new file mode 100644 index 000000000..7d527fc00 --- /dev/null +++ b/test/assets/webhook_get.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/test/assets/workbook_get.xml b/test/assets/workbook_get.xml index 6a753f70c..873ca3848 100644 --- a/test/assets/workbook_get.xml +++ b/test/assets/workbook_get.xml @@ -2,13 +2,12 @@ - + - - + diff --git a/test/assets/workbook_get_by_id.xml b/test/assets/workbook_get_by_id.xml index 13bb76523..98dfc4a75 100644 --- a/test/assets/workbook_get_by_id.xml +++ b/test/assets/workbook_get_by_id.xml @@ -1,6 +1,6 @@ - + @@ -11,4 +11,4 @@ - \ No newline at end of file + diff --git a/test/assets/workbook_get_by_id_acceleration_status.xml b/test/assets/workbook_get_by_id_acceleration_status.xml new file mode 100644 index 000000000..0d1f9b93d --- /dev/null +++ b/test/assets/workbook_get_by_id_acceleration_status.xml @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + diff --git a/test/assets/workbook_get_by_id_personal.xml b/test/assets/workbook_get_by_id_personal.xml new file mode 100644 index 000000000..90cc65e73 --- /dev/null +++ b/test/assets/workbook_get_by_id_personal.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + diff --git a/test/assets/workbook_get_invalid_date.xml b/test/assets/workbook_get_invalid_date.xml new file mode 100644 index 000000000..c580f9eb6 --- /dev/null +++ b/test/assets/workbook_get_invalid_date.xml @@ -0,0 +1,11 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/workbook_populate_permissions.xml b/test/assets/workbook_populate_permissions.xml new file mode 100644 index 000000000..57517d719 --- /dev/null +++ b/test/assets/workbook_populate_permissions.xml @@ -0,0 +1,27 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/assets/workbook_publish.xml b/test/assets/workbook_publish.xml index dcfc79936..3e23bda71 100644 --- a/test/assets/workbook_publish.xml +++ b/test/assets/workbook_publish.xml @@ -1,6 +1,6 @@ - + @@ -8,4 +8,4 @@ - \ No newline at end of file + diff --git a/test/assets/workbook_refresh.xml b/test/assets/workbook_refresh.xml new file mode 100644 index 000000000..6f5da8283 --- /dev/null +++ b/test/assets/workbook_refresh.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/test/assets/workbook_revision.xml b/test/assets/workbook_revision.xml new file mode 100644 index 000000000..8cadafc8f --- /dev/null +++ b/test/assets/workbook_revision.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/workbook_update.xml b/test/assets/workbook_update.xml index 7a72759d8..6e5d36105 100644 --- a/test/assets/workbook_update.xml +++ b/test/assets/workbook_update.xml @@ -4,6 +4,6 @@ - + \ No newline at end of file diff --git a/test/assets/workbook_update_acceleration_status.xml b/test/assets/workbook_update_acceleration_status.xml new file mode 100644 index 000000000..7c3366fee --- /dev/null +++ b/test/assets/workbook_update_acceleration_status.xml @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/workbook_update_data_freshness_policy.xml b/test/assets/workbook_update_data_freshness_policy.xml new file mode 100644 index 000000000..a69a097ba --- /dev/null +++ b/test/assets/workbook_update_data_freshness_policy.xml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/test/assets/workbook_update_data_freshness_policy2.xml b/test/assets/workbook_update_data_freshness_policy2.xml new file mode 100644 index 000000000..384f79ec0 --- /dev/null +++ b/test/assets/workbook_update_data_freshness_policy2.xml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/test/assets/workbook_update_data_freshness_policy3.xml b/test/assets/workbook_update_data_freshness_policy3.xml new file mode 100644 index 000000000..195013517 --- /dev/null +++ b/test/assets/workbook_update_data_freshness_policy3.xml @@ -0,0 +1,11 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/workbook_update_data_freshness_policy4.xml b/test/assets/workbook_update_data_freshness_policy4.xml new file mode 100644 index 000000000..8208d986a --- /dev/null +++ b/test/assets/workbook_update_data_freshness_policy4.xml @@ -0,0 +1,12 @@ + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/workbook_update_data_freshness_policy5.xml b/test/assets/workbook_update_data_freshness_policy5.xml new file mode 100644 index 000000000..b6e0358b6 --- /dev/null +++ b/test/assets/workbook_update_data_freshness_policy5.xml @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/workbook_update_data_freshness_policy6.xml b/test/assets/workbook_update_data_freshness_policy6.xml new file mode 100644 index 000000000..c8be8f6c1 --- /dev/null +++ b/test/assets/workbook_update_data_freshness_policy6.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/assets/workbook_update_permissions.xml b/test/assets/workbook_update_permissions.xml new file mode 100644 index 000000000..fffd90491 --- /dev/null +++ b/test/assets/workbook_update_permissions.xml @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/test/assets/workbook_update_views_acceleration_status.xml b/test/assets/workbook_update_views_acceleration_status.xml new file mode 100644 index 000000000..f2055fb79 --- /dev/null +++ b/test/assets/workbook_update_views_acceleration_status.xml @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/http/test_http_requests.py b/test/http/test_http_requests.py new file mode 100644 index 000000000..ce845502d --- /dev/null +++ b/test/http/test_http_requests.py @@ -0,0 +1,117 @@ +import tableauserverclient as TSC +import unittest +import requests +import requests_mock + +from unittest import mock +from requests.exceptions import MissingSchema + + +# This method will be used by the mock to replace requests.get +def mocked_requests_get(*args, **kwargs): + class MockResponse: + def __init__(self, status_code): + self.headers = {} + self.encoding = None + self.content = ( + "" + "" + "0.31" + "0.31" + "2022.3" + "" + "" + ) + self.status_code = status_code + + return MockResponse(200) + + +class ServerTests(unittest.TestCase): + def test_init_server_model_empty_throws(self): + with self.assertRaises(TypeError): + server = TSC.Server() + + def test_init_server_model_no_protocol_defaults_htt(self): + server = TSC.Server("fake-url") + + def test_init_server_model_valid_server_name_works(self): + server = TSC.Server("http://fake-url") + + def test_init_server_model_valid_https_server_name_works(self): + # by default, it will just set the version to 2.3 + server = TSC.Server("https://fake-url") + + def test_init_server_model_bad_server_name_not_version_check(self): + server = TSC.Server("fake-url", use_server_version=False) + + @mock.patch("requests.sessions.Session.get", side_effect=mocked_requests_get) + def test_init_server_model_bad_server_name_do_version_check(self, mock_get): + server = TSC.Server("fake-url", use_server_version=True) + + def test_init_server_model_bad_server_name_not_version_check_random_options(self): + # with self.assertRaises(MissingSchema): + server = TSC.Server("fake-url", use_server_version=False, http_options={"foo": 1}) + + def test_init_server_model_bad_server_name_not_version_check_real_options(self): + # with self.assertRaises(ValueError): + server = TSC.Server("fake-url", use_server_version=False, http_options={"verify": False}) + + def test_http_options_skip_ssl_works(self): + http_options = {"verify": False} + server = TSC.Server("http://fake-url") + server.add_http_options(http_options) + + def test_http_options_multiple_options_works(self): + http_options = {"verify": False, "birdname": "Parrot"} + server = TSC.Server("http://fake-url") + server.add_http_options(http_options) + + # ValueError: dictionary update sequence element #0 has length 1; 2 is required + def test_http_options_multiple_dicts_fails(self): + http_options_1 = {"verify": False} + http_options_2 = {"birdname": "Parrot"} + server = TSC.Server("http://fake-url") + with self.assertRaises(ValueError): + server.add_http_options([http_options_1, http_options_2]) + + # TypeError: cannot convert dictionary update sequence element #0 to a sequence + def test_http_options_not_sequence_fails(self): + server = TSC.Server("http://fake-url") + with self.assertRaises(ValueError): + server.add_http_options({1, 2, 3}) + + def test_validate_connection_http(self): + url = "http://cookies.com" + server = TSC.Server(url) + server.validate_connection_settings() + self.assertEqual(url, server.server_address) + + def test_validate_connection_https(self): + url = "https://cookies.com" + server = TSC.Server(url) + server.validate_connection_settings() + self.assertEqual(url, server.server_address) + + def test_validate_connection_no_protocol(self): + url = "cookies.com" + fixed_url = "http://cookies.com" + server = TSC.Server(url) + server.validate_connection_settings() + self.assertEqual(fixed_url, server.server_address) + + +class SessionTests(unittest.TestCase): + test_header = {"x-test": "true"} + + @staticmethod + def session_factory(): + session = requests.session() + session.headers.update(SessionTests.test_header) + return session + + def test_session_factory_adds_headers(self): + test_request_bin = "http://capture-this-with-mock.com" + with requests_mock.mock() as m: + m.get(url="http://capture-this-with-mock.com/api/2.4/serverInfo", request_headers=SessionTests.test_header) + server = TSC.Server(test_request_bin, use_server_version=True, session_factory=SessionTests.session_factory) diff --git a/test/models/_models.py b/test/models/_models.py new file mode 100644 index 000000000..59011c6c3 --- /dev/null +++ b/test/models/_models.py @@ -0,0 +1,58 @@ +from tableauserverclient import * + +# TODO why aren't these available in the tsc namespace? Probably a bug. +from tableauserverclient.models import ( + DataAccelerationReportItem, + Credentials, + ServerInfoItem, + Resource, + TableauItem, +) + + +def get_defined_models(): + # nothing clever here: list was manually copied from tsc/models/__init__.py + return [ + BackgroundJobItem, + ConnectionItem, + DataAccelerationReportItem, + DataAlertItem, + DatasourceItem, + FlowItem, + GroupItem, + JobItem, + MetricItem, + PermissionsRule, + ProjectItem, + RevisionItem, + ScheduleItem, + SubscriptionItem, + Credentials, + JWTAuth, + TableauAuth, + PersonalAccessTokenAuth, + ServerInfoItem, + SiteItem, + TaskItem, + UserItem, + ViewItem, + WebhookItem, + WorkbookItem, + PaginationItem, + Permission.Mode, + Permission.Capability, + DailyInterval, + WeeklyInterval, + MonthlyInterval, + HourlyInterval, + TableItem, + Target, + ] + + +def get_unimplemented_models(): + return [ + FavoriteItem, # no repr because there is no state + Resource, # list of type names + TableauItem, # should be an interface + ] diff --git a/test/models/test_repr.py b/test/models/test_repr.py new file mode 100644 index 000000000..92d11978f --- /dev/null +++ b/test/models/test_repr.py @@ -0,0 +1,51 @@ +import inspect + +from unittest import TestCase +import _models # type: ignore # did not set types for this +import tableauserverclient as TSC + +from typing import Any + + +# ensure that all models that don't need parameters can be instantiated +# todo.... +def instantiate_class(name: str, obj: Any): + # Get the constructor (init) of the class + constructor = getattr(obj, "__init__", None) + if constructor: + # Get the parameters of the constructor (excluding 'self') + parameters = inspect.signature(constructor).parameters.values() + required_parameters = [ + param for param in parameters if param.default == inspect.Parameter.empty and param.name != "self" + ] + if required_parameters: + print(f"Class '{name}' requires the following parameters for instantiation:") + for param in required_parameters: + print(f"- {param.name}") + else: + print(f"Class '{name}' does not require any parameters for instantiation.") + # Instantiate the class + instance = obj() + print(f"Instantiated: {name} -> {instance}") + else: + print(f"Class '{name}' does not have a constructor (__init__ method).") + + +class TestAllModels(TestCase): + # not all models have __repr__ yet: see above list + def test_repr_is_implemented(self): + m = _models.get_defined_models() + for model in m: + with self.subTest(model.__name__, model=model): + print(model.__name__, type(model.__repr__).__name__) + self.assertEqual(type(model.__repr__).__name__, "function") + + # 2 - Iterate through the objects in the module + def test_by_reflection(self): + for class_name, obj in inspect.getmembers(TSC, is_concrete): + with self.subTest(class_name, obj=obj): + instantiate_class(class_name, obj) + + +def is_concrete(obj: Any): + return inspect.isclass(obj) and not inspect.isabstract(obj) diff --git a/test/request_factory/__init__.py b/test/request_factory/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test/request_factory/test_datasource_requests.py b/test/request_factory/test_datasource_requests.py new file mode 100644 index 000000000..75bb535d5 --- /dev/null +++ b/test/request_factory/test_datasource_requests.py @@ -0,0 +1,15 @@ +import unittest +import tableauserverclient as TSC +import tableauserverclient.server.request_factory as TSC_RF +from tableauserverclient import DatasourceItem + + +class DatasourceRequestTests(unittest.TestCase): + def test_generate_xml(self): + datasource_item: TSC.DatasourceItem = TSC.DatasourceItem("name") + datasource_item.name = "a ds" + datasource_item.description = "described" + datasource_item.use_remote_query_agent = False + datasource_item.ask_data_enablement = DatasourceItem.AskDataEnablement.Enabled + datasource_item.project_id = "testval" + TSC_RF.RequestFactory.Datasource._generate_xml(datasource_item) diff --git a/test/request_factory/test_workbook_requests.py b/test/request_factory/test_workbook_requests.py new file mode 100644 index 000000000..332b6defa --- /dev/null +++ b/test/request_factory/test_workbook_requests.py @@ -0,0 +1,55 @@ +import unittest +import tableauserverclient as TSC +import tableauserverclient.server.request_factory as TSC_RF +from tableauserverclient.helpers.strings import redact_xml +import pytest +import sys + + +class WorkbookRequestTests(unittest.TestCase): + def test_embedded_extract_req(self): + include_all = True + embedded_datasources = None + xml_result = TSC_RF.RequestFactory.Workbook.embedded_extract_req(include_all, embedded_datasources) + + def test_generate_xml(self): + workbook_item: TSC.WorkbookItem = TSC.WorkbookItem("name", "project_id") + TSC_RF.RequestFactory.Workbook._generate_xml(workbook_item) + + def test_generate_xml_invalid_connection(self): + workbook_item: TSC.WorkbookItem = TSC.WorkbookItem("name", "project_id") + conn = TSC.ConnectionItem() + with self.assertRaises(ValueError): + request = TSC_RF.RequestFactory.Workbook._generate_xml(workbook_item, connections=[conn]) + + def test_generate_xml_invalid_connection_credentials(self): + workbook_item: TSC.WorkbookItem = TSC.WorkbookItem("name", "project_id") + conn = TSC.ConnectionItem() + conn.server_address = "address" + creds = TSC.ConnectionCredentials("username", "password") + creds.name = None + conn.connection_credentials = creds + with self.assertRaises(ValueError): + request = TSC_RF.RequestFactory.Workbook._generate_xml(workbook_item, connections=[conn]) + + def test_generate_xml_valid_connection_credentials(self): + workbook_item: TSC.WorkbookItem = TSC.WorkbookItem("name", "project_id") + conn = TSC.ConnectionItem() + conn.server_address = "address" + creds = TSC.ConnectionCredentials("username", "DELETEME") + conn.connection_credentials = creds + request = TSC_RF.RequestFactory.Workbook._generate_xml(workbook_item, connections=[conn]) + assert request.find(b"DELETEME") > 0 + + def test_redact_passwords_in_xml(self): + if sys.version_info < (3, 7): + pytest.skip("Redaction is only implemented for 3.7+.") + workbook_item: TSC.WorkbookItem = TSC.WorkbookItem("name", "project_id") + conn = TSC.ConnectionItem() + conn.server_address = "address" + creds = TSC.ConnectionCredentials("username", "DELETEME") + conn.connection_credentials = creds + request = TSC_RF.RequestFactory.Workbook._generate_xml(workbook_item, connections=[conn]) + redacted = redact_xml(request) + assert request.find(b"DELETEME") > 0, request + assert redacted.find(b"DELETEME") == -1, redacted diff --git a/test/test_auth.py b/test/test_auth.py index 870064db0..09e3e251d 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -1,71 +1,133 @@ -import unittest import os.path +import unittest + import requests_mock + import tableauserverclient as TSC -TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets') +TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets") -SIGN_IN_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in.xml') -SIGN_IN_IMPERSONATE_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in_impersonate.xml') -SIGN_IN_ERROR_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in_error.xml') +SIGN_IN_XML = os.path.join(TEST_ASSET_DIR, "auth_sign_in.xml") +SIGN_IN_IMPERSONATE_XML = os.path.join(TEST_ASSET_DIR, "auth_sign_in_impersonate.xml") +SIGN_IN_ERROR_XML = os.path.join(TEST_ASSET_DIR, "auth_sign_in_error.xml") class AuthTests(unittest.TestCase): def setUp(self): - self.server = TSC.Server('http://test') + self.server = TSC.Server("http://test", False) self.baseurl = self.server.auth.baseurl def test_sign_in(self): - with open(SIGN_IN_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + with open(SIGN_IN_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.post(self.baseurl + '/signin', text=response_xml) - tableau_auth = TSC.TableauAuth('testuser', 'password', site_id='Samples') + m.post(self.baseurl + "/signin", text=response_xml) + tableau_auth = TSC.TableauAuth("testuser", "password", site_id="Samples") self.server.auth.sign_in(tableau_auth) - self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token) - self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id) - self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id) + self.assertEqual("eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l", self.server.auth_token) + self.assertEqual("6b7179ba-b82b-4f0f-91ed-812074ac5da6", self.server.site_id) + self.assertEqual("Samples", self.server.site_url) + self.assertEqual("1a96d216-e9b8-497b-a82a-0b899a965e01", self.server.user_id) + + def test_sign_in_with_personal_access_tokens(self): + with open(SIGN_IN_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.baseurl + "/signin", text=response_xml) + tableau_auth = TSC.PersonalAccessTokenAuth( + token_name="mytoken", personal_access_token="Random123Generated", site_id="Samples" + ) + self.server.auth.sign_in(tableau_auth) + + self.assertEqual("eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l", self.server.auth_token) + self.assertEqual("6b7179ba-b82b-4f0f-91ed-812074ac5da6", self.server.site_id) + self.assertEqual("Samples", self.server.site_url) + self.assertEqual("1a96d216-e9b8-497b-a82a-0b899a965e01", self.server.user_id) def test_sign_in_impersonate(self): - with open(SIGN_IN_IMPERSONATE_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + with open(SIGN_IN_IMPERSONATE_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.post(self.baseurl + '/signin', text=response_xml) - tableau_auth = TSC.TableauAuth('testuser', 'password', - user_id_to_impersonate='dd2239f6-ddf1-4107-981a-4cf94e415794') + m.post(self.baseurl + "/signin", text=response_xml) + tableau_auth = TSC.TableauAuth( + "testuser", "password", user_id_to_impersonate="dd2239f6-ddf1-4107-981a-4cf94e415794" + ) self.server.auth.sign_in(tableau_auth) - self.assertEqual('MJonFA6HDyy2C3oqR13fRGqE6cmgzwq3', self.server.auth_token) - self.assertEqual('dad65087-b08b-4603-af4e-2887b8aafc67', self.server.site_id) - self.assertEqual('dd2239f6-ddf1-4107-981a-4cf94e415794', self.server.user_id) + self.assertEqual("MJonFA6HDyy2C3oqR13fRGqE6cmgzwq3", self.server.auth_token) + self.assertEqual("dad65087-b08b-4603-af4e-2887b8aafc67", self.server.site_id) + self.assertEqual("dd2239f6-ddf1-4107-981a-4cf94e415794", self.server.user_id) def test_sign_in_error(self): - with open(SIGN_IN_ERROR_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + with open(SIGN_IN_ERROR_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.post(self.baseurl + '/signin', text=response_xml, status_code=401) - tableau_auth = TSC.TableauAuth('testuser', 'wrongpassword') - self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth) + m.post(self.baseurl + "/signin", text=response_xml, status_code=401) + tableau_auth = TSC.TableauAuth("testuser", "wrongpassword") + self.assertRaises(TSC.FailedSignInError, self.server.auth.sign_in, tableau_auth) + + def test_sign_in_invalid_token(self): + with open(SIGN_IN_ERROR_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.baseurl + "/signin", text=response_xml, status_code=401) + tableau_auth = TSC.PersonalAccessTokenAuth(token_name="mytoken", personal_access_token="invalid") + self.assertRaises(TSC.FailedSignInError, self.server.auth.sign_in, tableau_auth) def test_sign_in_without_auth(self): - with open(SIGN_IN_ERROR_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + with open(SIGN_IN_ERROR_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.post(self.baseurl + '/signin', text=response_xml, status_code=401) - tableau_auth = TSC.TableauAuth('', '') - self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth) + m.post(self.baseurl + "/signin", text=response_xml, status_code=401) + tableau_auth = TSC.TableauAuth("", "") + self.assertRaises(TSC.FailedSignInError, self.server.auth.sign_in, tableau_auth) def test_sign_out(self): - with open(SIGN_IN_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + with open(SIGN_IN_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.post(self.baseurl + '/signin', text=response_xml) - m.post(self.baseurl + '/signout', text='') - tableau_auth = TSC.TableauAuth('testuser', 'password') + m.post(self.baseurl + "/signin", text=response_xml) + m.post(self.baseurl + "/signout", text="") + tableau_auth = TSC.TableauAuth("testuser", "password") self.server.auth.sign_in(tableau_auth) self.server.auth.sign_out() self.assertIsNone(self.server._auth_token) self.assertIsNone(self.server._site_id) + self.assertIsNone(self.server._site_url) self.assertIsNone(self.server._user_id) + + def test_switch_site(self): + self.server.version = "2.6" + baseurl = self.server.auth.baseurl + site_id, user_id, auth_token = list("123") + self.server._set_auth(site_id, user_id, auth_token) + with open(SIGN_IN_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(baseurl + "/switchSite", text=response_xml) + site = TSC.SiteItem("Samples", "Samples") + self.server.auth.switch_site(site) + + self.assertEqual("eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l", self.server.auth_token) + self.assertEqual("6b7179ba-b82b-4f0f-91ed-812074ac5da6", self.server.site_id) + self.assertEqual("Samples", self.server.site_url) + self.assertEqual("1a96d216-e9b8-497b-a82a-0b899a965e01", self.server.user_id) + + def test_revoke_all_server_admin_tokens(self): + self.server.version = "3.10" + baseurl = self.server.auth.baseurl + with open(SIGN_IN_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(baseurl + "/signin", text=response_xml) + m.post(baseurl + "/revokeAllServerAdminTokens", text="") + tableau_auth = TSC.TableauAuth("testuser", "password") + self.server.auth.sign_in(tableau_auth) + self.server.auth.revoke_all_server_admin_tokens() + + self.assertEqual("eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l", self.server.auth_token) + self.assertEqual("6b7179ba-b82b-4f0f-91ed-812074ac5da6", self.server.site_id) + self.assertEqual("Samples", self.server.site_url) + self.assertEqual("1a96d216-e9b8-497b-a82a-0b899a965e01", self.server.user_id) diff --git a/test/test_connection_.py b/test/test_connection_.py new file mode 100644 index 000000000..47b796ebe --- /dev/null +++ b/test/test_connection_.py @@ -0,0 +1,34 @@ +import unittest +import tableauserverclient as TSC + + +class DatasourceModelTests(unittest.TestCase): + def test_require_boolean_query_tag_fails(self): + conn = TSC.ConnectionItem() + conn._connection_type = "postgres" + with self.assertRaises(ValueError): + conn.query_tagging = "no" + + def test_set_query_tag_normal_conn(self): + conn = TSC.ConnectionItem() + conn._connection_type = "postgres" + conn.query_tagging = True + self.assertEqual(conn.query_tagging, True) + + def test_ignore_query_tag_for_hyper(self): + conn = TSC.ConnectionItem() + conn._connection_type = "hyper" + conn.query_tagging = True + self.assertEqual(conn.query_tagging, None) + + def test_ignore_query_tag_for_teradata(self): + conn = TSC.ConnectionItem() + conn._connection_type = "teradata" + conn.query_tagging = True + self.assertEqual(conn.query_tagging, None) + + def test_ignore_query_tag_for_snowflake(self): + conn = TSC.ConnectionItem() + conn._connection_type = "snowflake" + conn.query_tagging = True + self.assertEqual(conn.query_tagging, None) diff --git a/test/test_custom_view.py b/test/test_custom_view.py new file mode 100644 index 000000000..6e863a863 --- /dev/null +++ b/test/test_custom_view.py @@ -0,0 +1,320 @@ +from contextlib import ExitStack +import io +import os +from pathlib import Path +from tempfile import TemporaryDirectory +import unittest + +import requests_mock + +import tableauserverclient as TSC +from tableauserverclient.config import BYTES_PER_MB +from tableauserverclient.datetime_helpers import format_datetime +from tableauserverclient.server.endpoint.exceptions import MissingRequiredFieldError + +TEST_ASSET_DIR = Path(__file__).parent / "assets" + +GET_XML = os.path.join(TEST_ASSET_DIR, "custom_view_get.xml") +GET_XML_ID = os.path.join(TEST_ASSET_DIR, "custom_view_get_id.xml") +POPULATE_PREVIEW_IMAGE = os.path.join(TEST_ASSET_DIR, "Sample View Image.png") +CUSTOM_VIEW_UPDATE_XML = os.path.join(TEST_ASSET_DIR, "custom_view_update.xml") +CUSTOM_VIEW_POPULATE_PDF = os.path.join(TEST_ASSET_DIR, "populate_pdf.pdf") +CUSTOM_VIEW_POPULATE_CSV = os.path.join(TEST_ASSET_DIR, "populate_csv.csv") +CUSTOM_VIEW_DOWNLOAD = TEST_ASSET_DIR / "custom_view_download.json" +FILE_UPLOAD_INIT = TEST_ASSET_DIR / "fileupload_initialize.xml" +FILE_UPLOAD_APPEND = TEST_ASSET_DIR / "fileupload_append.xml" + + +class CustomViewTests(unittest.TestCase): + def setUp(self): + self.server = TSC.Server("http://test", False) + self.server.version = "3.21" # custom views only introduced in 3.19 + + # Fake sign in + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + + self.baseurl = self.server.custom_views.baseurl + + def test_get(self) -> None: + with open(GET_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + print(response_xml) + with requests_mock.mock() as m: + m.get(self.baseurl, text=response_xml) + all_views, pagination_item = self.server.custom_views.get() + + self.assertEqual(2, pagination_item.total_available) + self.assertEqual("d79634e1-6063-4ec9-95ff-50acbf609ff5", all_views[0].id) + self.assertEqual("ENDANGERED SAFARI", all_views[0].name) + self.assertEqual("SafariSample/sheets/ENDANGEREDSAFARI", all_views[0].content_url) + self.assertEqual("3cc6cd06-89ce-4fdc-b935-5294135d6d42", all_views[0].workbook.id) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", all_views[0].owner.id) + self.assertIsNone(all_views[0].created_at) + self.assertIsNone(all_views[0].updated_at) + self.assertFalse(all_views[0].shared) + + self.assertEqual("fd252f73-593c-4c4e-8584-c032b8022adc", all_views[1].id) + self.assertEqual("Overview", all_views[1].name) + self.assertEqual("6d13b0ca-043d-4d42-8c9d-3f3313ea3a00", all_views[1].workbook.id) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", all_views[1].owner.id) + self.assertEqual("2002-05-30T09:00:00Z", format_datetime(all_views[1].created_at)) + self.assertEqual("2002-06-05T08:00:59Z", format_datetime(all_views[1].updated_at)) + self.assertTrue(all_views[1].shared) + + def test_get_by_id(self) -> None: + with open(GET_XML_ID, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5", text=response_xml) + view: TSC.CustomViewItem = self.server.custom_views.get_by_id("d79634e1-6063-4ec9-95ff-50acbf609ff5") + + self.assertEqual("d79634e1-6063-4ec9-95ff-50acbf609ff5", view.id) + self.assertEqual("ENDANGERED SAFARI", view.name) + self.assertEqual("SafariSample/sheets/ENDANGEREDSAFARI", view.content_url) + if view.workbook: + self.assertEqual("3cc6cd06-89ce-4fdc-b935-5294135d6d42", view.workbook.id) + if view.owner: + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", view.owner.id) + if view.view: + self.assertEqual("5241e88d-d384-4fd7-9c2f-648b5247efc5", view.view.id) + self.assertEqual("2002-05-30T09:00:00Z", format_datetime(view.created_at)) + self.assertEqual("2002-06-05T08:00:59Z", format_datetime(view.updated_at)) + + def test_get_by_id_missing_id(self) -> None: + self.assertRaises(TSC.MissingRequiredFieldError, self.server.custom_views.get_by_id, None) + + def test_get_before_signin(self) -> None: + self.server._auth_token = None + self.assertRaises(TSC.NotSignedInError, self.server.custom_views.get) + + def test_populate_image(self) -> None: + with open(POPULATE_PREVIEW_IMAGE, "rb") as f: + response = f.read() + with requests_mock.mock() as m: + m.get(self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5/image", content=response) + single_view = TSC.CustomViewItem() + single_view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" + self.server.custom_views.populate_image(single_view) + self.assertEqual(response, single_view.image) + + def test_populate_image_with_options(self) -> None: + with open(POPULATE_PREVIEW_IMAGE, "rb") as f: + response = f.read() + with requests_mock.mock() as m: + m.get( + self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5/image?resolution=high&maxAge=10", content=response + ) + single_view = TSC.CustomViewItem() + single_view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" + req_option = TSC.ImageRequestOptions(imageresolution=TSC.ImageRequestOptions.Resolution.High, maxage=10) + self.server.custom_views.populate_image(single_view, req_option) + self.assertEqual(response, single_view.image) + + def test_populate_image_missing_id(self) -> None: + single_view = TSC.CustomViewItem() + single_view._id = None + self.assertRaises(TSC.MissingRequiredFieldError, self.server.custom_views.populate_image, single_view) + + def test_delete(self) -> None: + with requests_mock.mock() as m: + m.delete(self.baseurl + "/3cc6cd06-89ce-4fdc-b935-5294135d6d42", status_code=204) + self.server.custom_views.delete("3cc6cd06-89ce-4fdc-b935-5294135d6d42") + + def test_delete_missing_id(self) -> None: + self.assertRaises(ValueError, self.server.custom_views.delete, "") + + def test_update(self) -> None: + with open(CUSTOM_VIEW_UPDATE_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.put(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2", text=response_xml) + the_custom_view = TSC.CustomViewItem("1d0304cd-3796-429f-b815-7258370b9b74", name="Best test ever") + the_custom_view._id = "1f951daf-4061-451a-9df1-69a8062664f2" + the_custom_view.owner = TSC.UserItem() + the_custom_view.owner.id = "dd2239f6-ddf1-4107-981a-4cf94e415794" + the_custom_view = self.server.custom_views.update(the_custom_view) + + self.assertEqual("1f951daf-4061-451a-9df1-69a8062664f2", the_custom_view.id) + if the_custom_view.owner: + self.assertEqual("dd2239f6-ddf1-4107-981a-4cf94e415794", the_custom_view.owner.id) + self.assertEqual("Best test ever", the_custom_view.name) + + def test_update_missing_id(self) -> None: + cv = TSC.CustomViewItem(name="test") + self.assertRaises(TSC.MissingRequiredFieldError, self.server.custom_views.update, cv) + + def test_download(self) -> None: + cv = TSC.CustomViewItem(name="test") + cv._id = "1f951daf-4061-451a-9df1-69a8062664f2" + content = CUSTOM_VIEW_DOWNLOAD.read_bytes() + data = io.BytesIO() + with requests_mock.mock() as m: + m.get(f"{self.server.custom_views.expurl}/1f951daf-4061-451a-9df1-69a8062664f2/content", content=content) + self.server.custom_views.download(cv, data) + + assert data.getvalue() == content + + def test_publish_filepath(self) -> None: + cv = TSC.CustomViewItem(name="test") + cv._owner = TSC.UserItem() + cv._owner._id = "dd2239f6-ddf1-4107-981a-4cf94e415794" + cv._workbook = TSC.WorkbookItem() + cv._workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + with requests_mock.mock() as m: + m.post(self.server.custom_views.expurl, status_code=201, text=Path(GET_XML).read_text()) + view = self.server.custom_views.publish(cv, CUSTOM_VIEW_DOWNLOAD) + + assert view is not None + assert isinstance(view, TSC.CustomViewItem) + assert view.id is not None + assert view.name is not None + + def test_publish_file_str(self) -> None: + cv = TSC.CustomViewItem(name="test") + cv._owner = TSC.UserItem() + cv._owner._id = "dd2239f6-ddf1-4107-981a-4cf94e415794" + cv._workbook = TSC.WorkbookItem() + cv._workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + with requests_mock.mock() as m: + m.post(self.server.custom_views.expurl, status_code=201, text=Path(GET_XML).read_text()) + view = self.server.custom_views.publish(cv, str(CUSTOM_VIEW_DOWNLOAD)) + + assert view is not None + assert isinstance(view, TSC.CustomViewItem) + assert view.id is not None + assert view.name is not None + + def test_publish_file_io(self) -> None: + cv = TSC.CustomViewItem(name="test") + cv._owner = TSC.UserItem() + cv._owner._id = "dd2239f6-ddf1-4107-981a-4cf94e415794" + cv._workbook = TSC.WorkbookItem() + cv._workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + data = io.BytesIO(CUSTOM_VIEW_DOWNLOAD.read_bytes()) + with requests_mock.mock() as m: + m.post(self.server.custom_views.expurl, status_code=201, text=Path(GET_XML).read_text()) + view = self.server.custom_views.publish(cv, data) + + assert view is not None + assert isinstance(view, TSC.CustomViewItem) + assert view.id is not None + assert view.name is not None + + def test_publish_missing_owner_id(self) -> None: + cv = TSC.CustomViewItem(name="test") + cv._owner = TSC.UserItem() + cv._workbook = TSC.WorkbookItem() + cv._workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + with requests_mock.mock() as m: + m.post(self.server.custom_views.expurl, status_code=201, text=Path(GET_XML).read_text()) + with self.assertRaises(ValueError): + self.server.custom_views.publish(cv, CUSTOM_VIEW_DOWNLOAD) + + def test_publish_missing_wb_id(self) -> None: + cv = TSC.CustomViewItem(name="test") + cv._owner = TSC.UserItem() + cv._owner._id = "dd2239f6-ddf1-4107-981a-4cf94e415794" + cv._workbook = TSC.WorkbookItem() + with requests_mock.mock() as m: + m.post(self.server.custom_views.expurl, status_code=201, text=Path(GET_XML).read_text()) + with self.assertRaises(ValueError): + self.server.custom_views.publish(cv, CUSTOM_VIEW_DOWNLOAD) + + def test_large_publish(self): + cv = TSC.CustomViewItem(name="test") + cv._owner = TSC.UserItem() + cv._owner._id = "dd2239f6-ddf1-4107-981a-4cf94e415794" + cv._workbook = TSC.WorkbookItem() + cv._workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + with ExitStack() as stack: + temp_dir = stack.enter_context(TemporaryDirectory()) + file_path = Path(temp_dir) / "test_file" + file_path.write_bytes(os.urandom(65 * BYTES_PER_MB)) + mock = stack.enter_context(requests_mock.mock()) + # Mock initializing upload + mock.post(self.server.fileuploads.baseurl, status_code=201, text=FILE_UPLOAD_INIT.read_text()) + # Mock the upload + mock.put( + f"{self.server.fileuploads.baseurl}/7720:170fe6b1c1c7422dadff20f944d58a52-1:0", + text=FILE_UPLOAD_APPEND.read_text(), + ) + # Mock the publish + mock.post(self.server.custom_views.expurl, status_code=201, text=Path(GET_XML).read_text()) + + view = self.server.custom_views.publish(cv, file_path) + + assert view is not None + assert isinstance(view, TSC.CustomViewItem) + assert view.id is not None + assert view.name is not None + + def test_populate_pdf(self) -> None: + self.server.version = "3.23" + self.baseurl = self.server.custom_views.baseurl + with open(CUSTOM_VIEW_POPULATE_PDF, "rb") as f: + response = f.read() + with requests_mock.mock() as m: + m.get( + self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5/pdf?type=letter&orientation=portrait&maxAge=5", + content=response, + ) + custom_view = TSC.CustomViewItem() + custom_view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" + + size = TSC.PDFRequestOptions.PageType.Letter + orientation = TSC.PDFRequestOptions.Orientation.Portrait + req_option = TSC.PDFRequestOptions(size, orientation, 5) + + self.server.custom_views.populate_pdf(custom_view, req_option) + self.assertEqual(response, custom_view.pdf) + + def test_populate_csv(self) -> None: + self.server.version = "3.23" + self.baseurl = self.server.custom_views.baseurl + with open(CUSTOM_VIEW_POPULATE_CSV, "rb") as f: + response = f.read() + with requests_mock.mock() as m: + m.get(self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5/data?maxAge=1", content=response) + custom_view = TSC.CustomViewItem() + custom_view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" + request_option = TSC.CSVRequestOptions(maxage=1) + self.server.custom_views.populate_csv(custom_view, request_option) + + csv_file = b"".join(custom_view.csv) + self.assertEqual(response, csv_file) + + def test_populate_csv_default_maxage(self) -> None: + self.server.version = "3.23" + self.baseurl = self.server.custom_views.baseurl + with open(CUSTOM_VIEW_POPULATE_CSV, "rb") as f: + response = f.read() + with requests_mock.mock() as m: + m.get(self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5/data", content=response) + custom_view = TSC.CustomViewItem() + custom_view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" + self.server.custom_views.populate_csv(custom_view) + + csv_file = b"".join(custom_view.csv) + self.assertEqual(response, csv_file) + + def test_pdf_height(self) -> None: + self.server.version = "3.23" + self.baseurl = self.server.custom_views.baseurl + with open(CUSTOM_VIEW_POPULATE_PDF, "rb") as f: + response = f.read() + with requests_mock.mock() as m: + m.get( + self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5/pdf?vizHeight=1080&vizWidth=1920", + content=response, + ) + custom_view = TSC.CustomViewItem() + custom_view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" + + req_option = TSC.PDFRequestOptions( + viz_height=1080, + viz_width=1920, + ) + + self.server.custom_views.populate_pdf(custom_view, req_option) + self.assertEqual(response, custom_view.pdf) diff --git a/test/test_data_acceleration_report.py b/test/test_data_acceleration_report.py new file mode 100644 index 000000000..8f9f5a49e --- /dev/null +++ b/test/test_data_acceleration_report.py @@ -0,0 +1,42 @@ +import unittest + +import requests_mock + +import tableauserverclient as TSC +from ._utils import read_xml_asset + +GET_XML = "data_acceleration_report.xml" + + +class DataAccelerationReportTests(unittest.TestCase): + def setUp(self): + self.server = TSC.Server("http://test", False) + + # Fake signin + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + self.server.version = "3.8" + + self.baseurl = self.server.data_acceleration_report.baseurl + + def test_get(self): + response_xml = read_xml_asset(GET_XML) + with requests_mock.mock() as m: + m.get(self.baseurl, text=response_xml) + data_acceleration_report = self.server.data_acceleration_report.get() + + self.assertEqual(2, len(data_acceleration_report.comparison_records)) + + self.assertEqual("site-1", data_acceleration_report.comparison_records[0].site) + self.assertEqual("sheet-1", data_acceleration_report.comparison_records[0].sheet_uri) + self.assertEqual("0", data_acceleration_report.comparison_records[0].unaccelerated_session_count) + self.assertEqual("0.0", data_acceleration_report.comparison_records[0].avg_non_accelerated_plt) + self.assertEqual("1", data_acceleration_report.comparison_records[0].accelerated_session_count) + self.assertEqual("0.166", data_acceleration_report.comparison_records[0].avg_accelerated_plt) + + self.assertEqual("site-2", data_acceleration_report.comparison_records[1].site) + self.assertEqual("sheet-2", data_acceleration_report.comparison_records[1].sheet_uri) + self.assertEqual("2", data_acceleration_report.comparison_records[1].unaccelerated_session_count) + self.assertEqual("1.29", data_acceleration_report.comparison_records[1].avg_non_accelerated_plt) + self.assertEqual("3", data_acceleration_report.comparison_records[1].accelerated_session_count) + self.assertEqual("0.372", data_acceleration_report.comparison_records[1].avg_accelerated_plt) diff --git a/test/test_data_freshness_policy.py b/test/test_data_freshness_policy.py new file mode 100644 index 000000000..9591a6380 --- /dev/null +++ b/test/test_data_freshness_policy.py @@ -0,0 +1,189 @@ +import os +import requests_mock +import unittest + +import tableauserverclient as TSC + +TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets") + +UPDATE_DFP_ALWAYS_LIVE_XML = os.path.join(TEST_ASSET_DIR, "workbook_update_data_freshness_policy.xml") +UPDATE_DFP_SITE_DEFAULT_XML = os.path.join(TEST_ASSET_DIR, "workbook_update_data_freshness_policy2.xml") +UPDATE_DFP_FRESH_EVERY_XML = os.path.join(TEST_ASSET_DIR, "workbook_update_data_freshness_policy3.xml") +UPDATE_DFP_FRESH_AT_DAILY_XML = os.path.join(TEST_ASSET_DIR, "workbook_update_data_freshness_policy4.xml") +UPDATE_DFP_FRESH_AT_WEEKLY_XML = os.path.join(TEST_ASSET_DIR, "workbook_update_data_freshness_policy5.xml") +UPDATE_DFP_FRESH_AT_MONTHLY_XML = os.path.join(TEST_ASSET_DIR, "workbook_update_data_freshness_policy6.xml") + + +class WorkbookTests(unittest.TestCase): + def setUp(self) -> None: + self.server = TSC.Server("http://test", False) + + # Fake sign in + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + + self.baseurl = self.server.workbooks.baseurl + + def test_update_DFP_always_live(self) -> None: + with open(UPDATE_DFP_ALWAYS_LIVE_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.put(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2", text=response_xml) + single_workbook = TSC.WorkbookItem("1d0304cd-3796-429f-b815-7258370b9b74", show_tabs=True) + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + single_workbook.data_freshness_policy = TSC.DataFreshnessPolicyItem( + TSC.DataFreshnessPolicyItem.Option.AlwaysLive + ) + single_workbook = self.server.workbooks.update(single_workbook) + + self.assertEqual("1f951daf-4061-451a-9df1-69a8062664f2", single_workbook.id) + self.assertEqual("AlwaysLive", single_workbook.data_freshness_policy.option) + + def test_update_DFP_site_default(self) -> None: + with open(UPDATE_DFP_SITE_DEFAULT_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.put(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2", text=response_xml) + single_workbook = TSC.WorkbookItem("1d0304cd-3796-429f-b815-7258370b9b74", show_tabs=True) + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + single_workbook.data_freshness_policy = TSC.DataFreshnessPolicyItem( + TSC.DataFreshnessPolicyItem.Option.SiteDefault + ) + single_workbook = self.server.workbooks.update(single_workbook) + + self.assertEqual("1f951daf-4061-451a-9df1-69a8062664f2", single_workbook.id) + self.assertEqual("SiteDefault", single_workbook.data_freshness_policy.option) + + def test_update_DFP_fresh_every(self) -> None: + with open(UPDATE_DFP_FRESH_EVERY_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.put(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2", text=response_xml) + single_workbook = TSC.WorkbookItem("1d0304cd-3796-429f-b815-7258370b9b74", show_tabs=True) + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + single_workbook.data_freshness_policy = TSC.DataFreshnessPolicyItem( + TSC.DataFreshnessPolicyItem.Option.FreshEvery + ) + fresh_every_ten_hours = TSC.DataFreshnessPolicyItem.FreshEvery( + TSC.DataFreshnessPolicyItem.FreshEvery.Frequency.Hours, 10 + ) + single_workbook.data_freshness_policy.fresh_every_schedule = fresh_every_ten_hours + single_workbook = self.server.workbooks.update(single_workbook) + + self.assertEqual("1f951daf-4061-451a-9df1-69a8062664f2", single_workbook.id) + self.assertEqual("FreshEvery", single_workbook.data_freshness_policy.option) + self.assertEqual("Hours", single_workbook.data_freshness_policy.fresh_every_schedule.frequency) + self.assertEqual(10, single_workbook.data_freshness_policy.fresh_every_schedule.value) + + def test_update_DFP_fresh_every_missing_attributes(self) -> None: + with open(UPDATE_DFP_FRESH_EVERY_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.put(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2", text=response_xml) + single_workbook = TSC.WorkbookItem("1d0304cd-3796-429f-b815-7258370b9b74", show_tabs=True) + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + single_workbook.data_freshness_policy = TSC.DataFreshnessPolicyItem( + TSC.DataFreshnessPolicyItem.Option.FreshEvery + ) + + self.assertRaises(ValueError, self.server.workbooks.update, single_workbook) + + def test_update_DFP_fresh_at_day(self) -> None: + with open(UPDATE_DFP_FRESH_AT_DAILY_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.put(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2", text=response_xml) + single_workbook = TSC.WorkbookItem("1d0304cd-3796-429f-b815-7258370b9b74", show_tabs=True) + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + single_workbook.data_freshness_policy = TSC.DataFreshnessPolicyItem( + TSC.DataFreshnessPolicyItem.Option.FreshAt + ) + fresh_at_10pm_daily = TSC.DataFreshnessPolicyItem.FreshAt( + TSC.DataFreshnessPolicyItem.FreshAt.Frequency.Day, "22:00:00", " Asia/Singapore" + ) + single_workbook.data_freshness_policy.fresh_at_schedule = fresh_at_10pm_daily + single_workbook = self.server.workbooks.update(single_workbook) + + self.assertEqual("1f951daf-4061-451a-9df1-69a8062664f2", single_workbook.id) + self.assertEqual("FreshAt", single_workbook.data_freshness_policy.option) + self.assertEqual("Day", single_workbook.data_freshness_policy.fresh_at_schedule.frequency) + self.assertEqual("22:00:00", single_workbook.data_freshness_policy.fresh_at_schedule.time) + self.assertEqual("Asia/Singapore", single_workbook.data_freshness_policy.fresh_at_schedule.timezone) + + def test_update_DFP_fresh_at_week(self) -> None: + with open(UPDATE_DFP_FRESH_AT_WEEKLY_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.put(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2", text=response_xml) + single_workbook = TSC.WorkbookItem("1d0304cd-3796-429f-b815-7258370b9b74", show_tabs=True) + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + single_workbook.data_freshness_policy = TSC.DataFreshnessPolicyItem( + TSC.DataFreshnessPolicyItem.Option.FreshAt + ) + fresh_at_10am_mon_wed = TSC.DataFreshnessPolicyItem.FreshAt( + TSC.DataFreshnessPolicyItem.FreshAt.Frequency.Week, + "10:00:00", + "America/Los_Angeles", + ["Monday", "Wednesday"], + ) + single_workbook.data_freshness_policy.fresh_at_schedule = fresh_at_10am_mon_wed + single_workbook = self.server.workbooks.update(single_workbook) + + self.assertEqual("1f951daf-4061-451a-9df1-69a8062664f2", single_workbook.id) + self.assertEqual("FreshAt", single_workbook.data_freshness_policy.option) + self.assertEqual("Week", single_workbook.data_freshness_policy.fresh_at_schedule.frequency) + self.assertEqual("10:00:00", single_workbook.data_freshness_policy.fresh_at_schedule.time) + self.assertEqual("Wednesday", single_workbook.data_freshness_policy.fresh_at_schedule.interval_item[0]) + self.assertEqual("Monday", single_workbook.data_freshness_policy.fresh_at_schedule.interval_item[1]) + + def test_update_DFP_fresh_at_month(self) -> None: + with open(UPDATE_DFP_FRESH_AT_MONTHLY_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.put(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2", text=response_xml) + single_workbook = TSC.WorkbookItem("1d0304cd-3796-429f-b815-7258370b9b74", show_tabs=True) + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + single_workbook.data_freshness_policy = TSC.DataFreshnessPolicyItem( + TSC.DataFreshnessPolicyItem.Option.FreshAt + ) + fresh_at_00am_lastDayOfMonth = TSC.DataFreshnessPolicyItem.FreshAt( + TSC.DataFreshnessPolicyItem.FreshAt.Frequency.Month, "00:00:00", "America/Los_Angeles", ["LastDay"] + ) + single_workbook.data_freshness_policy.fresh_at_schedule = fresh_at_00am_lastDayOfMonth + single_workbook = self.server.workbooks.update(single_workbook) + + self.assertEqual("1f951daf-4061-451a-9df1-69a8062664f2", single_workbook.id) + self.assertEqual("FreshAt", single_workbook.data_freshness_policy.option) + self.assertEqual("Month", single_workbook.data_freshness_policy.fresh_at_schedule.frequency) + self.assertEqual("00:00:00", single_workbook.data_freshness_policy.fresh_at_schedule.time) + self.assertEqual("LastDay", single_workbook.data_freshness_policy.fresh_at_schedule.interval_item[0]) + + def test_update_DFP_fresh_at_missing_params(self) -> None: + with open(UPDATE_DFP_FRESH_AT_DAILY_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.put(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2", text=response_xml) + single_workbook = TSC.WorkbookItem("1d0304cd-3796-429f-b815-7258370b9b74", show_tabs=True) + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + single_workbook.data_freshness_policy = TSC.DataFreshnessPolicyItem( + TSC.DataFreshnessPolicyItem.Option.FreshAt + ) + + self.assertRaises(ValueError, self.server.workbooks.update, single_workbook) + + def test_update_DFP_fresh_at_missing_interval(self) -> None: + with open(UPDATE_DFP_FRESH_AT_DAILY_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.put(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2", text=response_xml) + single_workbook = TSC.WorkbookItem("1d0304cd-3796-429f-b815-7258370b9b74", show_tabs=True) + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + single_workbook.data_freshness_policy = TSC.DataFreshnessPolicyItem( + TSC.DataFreshnessPolicyItem.Option.FreshAt + ) + fresh_at_month_no_interval = TSC.DataFreshnessPolicyItem.FreshAt( + TSC.DataFreshnessPolicyItem.FreshAt.Frequency.Month, "00:00:00", "America/Los_Angeles" + ) + single_workbook.data_freshness_policy.fresh_at_schedule = fresh_at_month_no_interval + + self.assertRaises(ValueError, self.server.workbooks.update, single_workbook) diff --git a/test/test_dataalert.py b/test/test_dataalert.py new file mode 100644 index 000000000..6f6f1683c --- /dev/null +++ b/test/test_dataalert.py @@ -0,0 +1,112 @@ +import unittest + +import requests_mock + +import tableauserverclient as TSC +from ._utils import read_xml_asset + +GET_XML = "data_alerts_get.xml" +GET_BY_ID_XML = "data_alerts_get_by_id.xml" +ADD_USER_TO_ALERT = "data_alerts_add_user.xml" +UPDATE_XML = "data_alerts_update.xml" + + +class DataAlertTests(unittest.TestCase): + def setUp(self) -> None: + self.server = TSC.Server("http://test", False) + + # Fake signin + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + self.server.version = "3.2" + + self.baseurl = self.server.data_alerts.baseurl + + def test_get(self) -> None: + response_xml = read_xml_asset(GET_XML) + with requests_mock.mock() as m: + m.get(self.baseurl, text=response_xml) + all_alerts, pagination_item = self.server.data_alerts.get() + + self.assertEqual(1, pagination_item.total_available) + self.assertEqual("5ea59b45-e497-5673-8809-bfe213236f75", all_alerts[0].id) + self.assertEqual("Data Alert test", all_alerts[0].subject) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", all_alerts[0].creatorId) + self.assertEqual("2020-08-10T23:17:06Z", all_alerts[0].createdAt) + self.assertEqual("2020-08-10T23:17:06Z", all_alerts[0].updatedAt) + self.assertEqual("Daily", all_alerts[0].frequency) + self.assertEqual("true", all_alerts[0].public) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", all_alerts[0].owner_id) + self.assertEqual("Bob", all_alerts[0].owner_name) + self.assertEqual("d79634e1-6063-4ec9-95ff-50acbf609ff5", all_alerts[0].view_id) + self.assertEqual("ENDANGERED SAFARI", all_alerts[0].view_name) + self.assertEqual("6d13b0ca-043d-4d42-8c9d-3f3313ea3a00", all_alerts[0].workbook_id) + self.assertEqual("Safari stats", all_alerts[0].workbook_name) + self.assertEqual("5241e88d-d384-4fd7-9c2f-648b5247efc5", all_alerts[0].project_id) + self.assertEqual("Default", all_alerts[0].project_name) + + def test_get_by_id(self) -> None: + response_xml = read_xml_asset(GET_BY_ID_XML) + with requests_mock.mock() as m: + m.get(self.baseurl + "/5ea59b45-e497-5673-8809-bfe213236f75", text=response_xml) + alert = self.server.data_alerts.get_by_id("5ea59b45-e497-5673-8809-bfe213236f75") + + self.assertTrue(isinstance(alert.recipients, list)) + self.assertEqual(len(alert.recipients), 1) + self.assertEqual(alert.recipients[0], "dd2239f6-ddf1-4107-981a-4cf94e415794") + + def test_update(self) -> None: + response_xml = read_xml_asset(UPDATE_XML) + with requests_mock.mock() as m: + m.put(self.baseurl + "/5ea59b45-e497-5673-8809-bfe213236f75", text=response_xml) + single_alert = TSC.DataAlertItem() + single_alert._id = "5ea59b45-e497-5673-8809-bfe213236f75" + single_alert._subject = "Data Alert test" + single_alert._frequency = "Daily" + single_alert._public = True + single_alert._owner_id = "5de011f8-5aa9-4d5b-b991-f462c8dd6bb7" + single_alert = self.server.data_alerts.update(single_alert) + + self.assertEqual("5ea59b45-e497-5673-8809-bfe213236f75", single_alert.id) + self.assertEqual("Data Alert test", single_alert.subject) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", single_alert.creatorId) + self.assertEqual("2020-08-10T23:17:06Z", single_alert.createdAt) + self.assertEqual("2020-08-10T23:17:06Z", single_alert.updatedAt) + self.assertEqual("Daily", single_alert.frequency) + self.assertEqual("true", single_alert.public) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", single_alert.owner_id) + self.assertEqual("Bob", single_alert.owner_name) + self.assertEqual("d79634e1-6063-4ec9-95ff-50acbf609ff5", single_alert.view_id) + self.assertEqual("ENDANGERED SAFARI", single_alert.view_name) + self.assertEqual("6d13b0ca-043d-4d42-8c9d-3f3313ea3a00", single_alert.workbook_id) + self.assertEqual("Safari stats", single_alert.workbook_name) + self.assertEqual("5241e88d-d384-4fd7-9c2f-648b5247efc5", single_alert.project_id) + self.assertEqual("Default", single_alert.project_name) + + def test_add_user_to_alert(self) -> None: + response_xml = read_xml_asset(ADD_USER_TO_ALERT) + single_alert = TSC.DataAlertItem() + single_alert._id = "0448d2ed-590d-4fa0-b272-a2a8a24555b5" + in_user = TSC.UserItem("Bob", TSC.UserItem.Roles.Explorer) + in_user._id = "5de011f8-5aa9-4d5b-b991-f462c8dd6bb7" + + with requests_mock.mock() as m: + m.post(self.baseurl + "/0448d2ed-590d-4fa0-b272-a2a8a24555b5/users", text=response_xml) + + out_user = self.server.data_alerts.add_user_to_alert(single_alert, in_user) + + self.assertEqual(out_user.id, in_user.id) + self.assertEqual(out_user.name, in_user.name) + self.assertEqual(out_user.site_role, in_user.site_role) + + def test_delete(self) -> None: + with requests_mock.mock() as m: + m.delete(self.baseurl + "/0448d2ed-590d-4fa0-b272-a2a8a24555b5", status_code=204) + self.server.data_alerts.delete("0448d2ed-590d-4fa0-b272-a2a8a24555b5") + + def test_delete_user_from_alert(self) -> None: + alert_id = "5ea59b45-e497-5673-8809-bfe213236f75" + user_id = "5de011f8-5aa9-4d5b-b991-f462c8dd6bb7" + with requests_mock.mock() as m: + m.delete(self.baseurl + f"/{alert_id}/users/{user_id}", status_code=204) + self.server.data_alerts.delete_user_from_alert(alert_id, user_id) diff --git a/test/test_database.py b/test/test_database.py new file mode 100644 index 000000000..3fd2c9a67 --- /dev/null +++ b/test/test_database.py @@ -0,0 +1,113 @@ +import unittest + +import requests_mock + +import tableauserverclient as TSC +from ._utils import read_xml_asset, asset + +GET_XML = "database_get.xml" +POPULATE_PERMISSIONS_XML = "database_populate_permissions.xml" +UPDATE_XML = "database_update.xml" +GET_DQW_BY_CONTENT = "dqw_by_content_type.xml" + + +class DatabaseTests(unittest.TestCase): + def setUp(self): + self.server = TSC.Server("http://test", False) + # Fake signin + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + self.server.version = "3.5" + + self.baseurl = self.server.databases.baseurl + + def test_get(self): + response_xml = read_xml_asset(GET_XML) + with requests_mock.mock() as m: + m.get(self.baseurl, text=response_xml) + all_databases, pagination_item = self.server.databases.get() + + self.assertEqual(5, pagination_item.total_available) + self.assertEqual("5ea59b45-e497-4827-8809-bfe213236f75", all_databases[0].id) + self.assertEqual("hyper", all_databases[0].connection_type) + self.assertEqual("hyper_0.hyper", all_databases[0].name) + + self.assertEqual("23591f2c-4802-4d6a-9e28-574a8ea9bc4c", all_databases[1].id) + self.assertEqual("sqlserver", all_databases[1].connection_type) + self.assertEqual("testv1", all_databases[1].name) + self.assertEqual("9324cf6b-ba72-4b8e-b895-ac3f28d2f0e0", all_databases[1].contact_id) + self.assertEqual(True, all_databases[1].certified) + + def test_update(self): + response_xml = read_xml_asset(UPDATE_XML) + with requests_mock.mock() as m: + m.put(self.baseurl + "/23591f2c-4802-4d6a-9e28-574a8ea9bc4c", text=response_xml) + single_database = TSC.DatabaseItem("test") + single_database.contact_id = "9324cf6b-ba72-4b8e-b895-ac3f28d2f0e0" + single_database._id = "23591f2c-4802-4d6a-9e28-574a8ea9bc4c" + single_database.certified = True + single_database.certification_note = "Test" + single_database = self.server.databases.update(single_database) + + self.assertEqual("23591f2c-4802-4d6a-9e28-574a8ea9bc4c", single_database.id) + self.assertEqual("9324cf6b-ba72-4b8e-b895-ac3f28d2f0e0", single_database.contact_id) + self.assertEqual(True, single_database.certified) + self.assertEqual("Test", single_database.certification_note) + + def test_populate_permissions(self): + with open(asset(POPULATE_PERMISSIONS_XML), "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.baseurl + "/0448d2ed-590d-4fa0-b272-a2a8a24555b5/permissions", text=response_xml) + single_database = TSC.DatabaseItem("test") + single_database._id = "0448d2ed-590d-4fa0-b272-a2a8a24555b5" + + self.server.databases.populate_permissions(single_database) + permissions = single_database.permissions + + self.assertEqual(permissions[0].grantee.tag_name, "group") + self.assertEqual(permissions[0].grantee.id, "5e5e1978-71fa-11e4-87dd-7382f5c437af") + self.assertDictEqual( + permissions[0].capabilities, + { + TSC.Permission.Capability.ChangePermissions: TSC.Permission.Mode.Deny, + TSC.Permission.Capability.Read: TSC.Permission.Mode.Allow, + }, + ) + + self.assertEqual(permissions[1].grantee.tag_name, "user") + self.assertEqual(permissions[1].grantee.id, "7c37ee24-c4b1-42b6-a154-eaeab7ee330a") + self.assertDictEqual( + permissions[1].capabilities, + { + TSC.Permission.Capability.Write: TSC.Permission.Mode.Allow, + }, + ) + + def test_populate_data_quality_warning(self): + with open(asset(GET_DQW_BY_CONTENT), "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get( + self.server.databases._data_quality_warnings.baseurl + "/94441d26-9a52-4a42-b0fb-3f94792d1aac", + text=response_xml, + ) + single_database = TSC.DatabaseItem("test") + single_database._id = "94441d26-9a52-4a42-b0fb-3f94792d1aac" + + self.server.databases.populate_dqw(single_database) + dqws = single_database.dqws + first_dqw = dqws.pop() + self.assertEqual(first_dqw.id, "c2e0e406-84fb-4f4e-9998-f20dd9306710") + self.assertEqual(first_dqw.warning_type, "WARNING") + self.assertEqual(first_dqw.message, "Hello, World!") + self.assertEqual(first_dqw.owner_id, "eddc8c5f-6af0-40be-b6b0-2c790290a43f") + self.assertEqual(first_dqw.active, True) + self.assertEqual(first_dqw.severe, True) + self.assertEqual(str(first_dqw.created_at), "2021-04-09 18:39:54+00:00") + self.assertEqual(str(first_dqw.updated_at), "2021-04-09 18:39:54+00:00") + + def test_delete(self): + with requests_mock.mock() as m: + m.delete(self.baseurl + "/0448d2ed-590d-4fa0-b272-a2a8a24555b5", status_code=204) + self.server.databases.delete("0448d2ed-590d-4fa0-b272-a2a8a24555b5") diff --git a/test/test_datasource.py b/test/test_datasource.py index 1b21c0194..e8a95722b 100644 --- a/test/test_datasource.py +++ b/test/test_datasource.py @@ -1,66 +1,91 @@ -import unittest import os +import tempfile +import unittest +from io import BytesIO +from typing import Optional +from zipfile import ZipFile + import requests_mock -import xml.etree.ElementTree as ET +from defusedxml.ElementTree import fromstring + import tableauserverclient as TSC +from tableauserverclient import ConnectionItem from tableauserverclient.datetime_helpers import format_datetime +from tableauserverclient.server.endpoint.exceptions import InternalServerError +from tableauserverclient.server.endpoint.fileuploads_endpoint import Fileuploads from tableauserverclient.server.request_factory import RequestFactory from ._utils import read_xml_asset, read_xml_assets, asset -ADD_TAGS_XML = 'datasource_add_tags.xml' -GET_XML = 'datasource_get.xml' -GET_EMPTY_XML = 'datasource_get_empty.xml' -GET_BY_ID_XML = 'datasource_get_by_id.xml' -POPULATE_CONNECTIONS_XML = 'datasource_populate_connections.xml' -PUBLISH_XML = 'datasource_publish.xml' -PUBLISH_XML_ASYNC = 'datasource_publish_async.xml' -UPDATE_XML = 'datasource_update.xml' -UPDATE_CONNECTION_XML = 'datasource_connection_update.xml' +ADD_TAGS_XML = "datasource_add_tags.xml" +GET_XML = "datasource_get.xml" +GET_EMPTY_XML = "datasource_get_empty.xml" +GET_BY_ID_XML = "datasource_get_by_id.xml" +POPULATE_CONNECTIONS_XML = "datasource_populate_connections.xml" +POPULATE_PERMISSIONS_XML = "datasource_populate_permissions.xml" +PUBLISH_XML = "datasource_publish.xml" +PUBLISH_XML_ASYNC = "datasource_publish_async.xml" +REFRESH_XML = "datasource_refresh.xml" +REVISION_XML = "datasource_revision.xml" +UPDATE_XML = "datasource_update.xml" +UPDATE_HYPER_DATA_XML = "datasource_data_update.xml" +UPDATE_CONNECTION_XML = "datasource_connection_update.xml" class DatasourceTests(unittest.TestCase): - def setUp(self): - self.server = TSC.Server('http://test') + def setUp(self) -> None: + self.server = TSC.Server("http://test", False) # Fake signin - self.server._site_id = 'dad65087-b08b-4603-af4e-2887b8aafc67' - self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM' + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" self.baseurl = self.server.datasources.baseurl - def test_get(self): + def test_get(self) -> None: response_xml = read_xml_asset(GET_XML) with requests_mock.mock() as m: m.get(self.baseurl, text=response_xml) all_datasources, pagination_item = self.server.datasources.get() self.assertEqual(2, pagination_item.total_available) - self.assertEqual('e76a1461-3b1d-4588-bf1b-17551a879ad9', all_datasources[0].id) - self.assertEqual('dataengine', all_datasources[0].datasource_type) - self.assertEqual('SampleDS', all_datasources[0].content_url) - self.assertEqual('2016-08-11T21:22:40Z', format_datetime(all_datasources[0].created_at)) - self.assertEqual('2016-08-11T21:34:17Z', format_datetime(all_datasources[0].updated_at)) - self.assertEqual('default', all_datasources[0].project_name) - self.assertEqual('SampleDS', all_datasources[0].name) - self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', all_datasources[0].project_id) - self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', all_datasources[0].owner_id) - - self.assertEqual('9dbd2263-16b5-46e1-9c43-a76bb8ab65fb', all_datasources[1].id) - self.assertEqual('dataengine', all_datasources[1].datasource_type) - self.assertEqual('Sampledatasource', all_datasources[1].content_url) - self.assertEqual('2016-08-04T21:31:55Z', format_datetime(all_datasources[1].created_at)) - self.assertEqual('2016-08-04T21:31:55Z', format_datetime(all_datasources[1].updated_at)) - self.assertEqual('default', all_datasources[1].project_name) - self.assertEqual('Sample datasource', all_datasources[1].name) - self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', all_datasources[1].project_id) - self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', all_datasources[1].owner_id) - self.assertEqual(set(['world', 'indicators', 'sample']), all_datasources[1].tags) - - def test_get_before_signin(self): + self.assertEqual("e76a1461-3b1d-4588-bf1b-17551a879ad9", all_datasources[0].id) + self.assertEqual("dataengine", all_datasources[0].datasource_type) + self.assertEqual("SampleDsDescription", all_datasources[0].description) + self.assertEqual("SampleDS", all_datasources[0].content_url) + self.assertEqual(4096, all_datasources[0].size) + self.assertEqual("2016-08-11T21:22:40Z", format_datetime(all_datasources[0].created_at)) + self.assertEqual("2016-08-11T21:34:17Z", format_datetime(all_datasources[0].updated_at)) + self.assertEqual("default", all_datasources[0].project_name) + self.assertEqual("SampleDS", all_datasources[0].name) + self.assertEqual("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", all_datasources[0].project_id) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", all_datasources[0].owner_id) + self.assertEqual("https://web.com", all_datasources[0].webpage_url) + self.assertFalse(all_datasources[0].encrypt_extracts) + self.assertTrue(all_datasources[0].has_extracts) + self.assertFalse(all_datasources[0].use_remote_query_agent) + + self.assertEqual("9dbd2263-16b5-46e1-9c43-a76bb8ab65fb", all_datasources[1].id) + self.assertEqual("dataengine", all_datasources[1].datasource_type) + self.assertEqual("description Sample", all_datasources[1].description) + self.assertEqual("Sampledatasource", all_datasources[1].content_url) + self.assertEqual(10240, all_datasources[1].size) + self.assertEqual("2016-08-04T21:31:55Z", format_datetime(all_datasources[1].created_at)) + self.assertEqual("2016-08-04T21:31:55Z", format_datetime(all_datasources[1].updated_at)) + self.assertEqual("default", all_datasources[1].project_name) + self.assertEqual("Sample datasource", all_datasources[1].name) + self.assertEqual("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", all_datasources[1].project_id) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", all_datasources[1].owner_id) + self.assertEqual({"world", "indicators", "sample"}, all_datasources[1].tags) + self.assertEqual("https://page.com", all_datasources[1].webpage_url) + self.assertTrue(all_datasources[1].encrypt_extracts) + self.assertFalse(all_datasources[1].has_extracts) + self.assertTrue(all_datasources[1].use_remote_query_agent) + + def test_get_before_signin(self) -> None: self.server._auth_token = None self.assertRaises(TSC.NotSignedInError, self.server.datasources.get) - def test_get_empty(self): + def test_get_empty(self) -> None: response_xml = read_xml_asset(GET_EMPTY_XML) with requests_mock.mock() as m: m.get(self.baseurl, text=response_xml) @@ -69,247 +94,623 @@ def test_get_empty(self): self.assertEqual(0, pagination_item.total_available) self.assertEqual([], all_datasources) - def test_get_by_id(self): + def test_get_by_id(self) -> None: response_xml = read_xml_asset(GET_BY_ID_XML) with requests_mock.mock() as m: - m.get(self.baseurl + '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb', text=response_xml) - single_datasource = self.server.datasources.get_by_id('9dbd2263-16b5-46e1-9c43-a76bb8ab65fb') - - self.assertEqual('9dbd2263-16b5-46e1-9c43-a76bb8ab65fb', single_datasource.id) - self.assertEqual('dataengine', single_datasource.datasource_type) - self.assertEqual('Sampledatasource', single_datasource.content_url) - self.assertEqual('2016-08-04T21:31:55Z', format_datetime(single_datasource.created_at)) - self.assertEqual('2016-08-04T21:31:55Z', format_datetime(single_datasource.updated_at)) - self.assertEqual('default', single_datasource.project_name) - self.assertEqual('Sample datasource', single_datasource.name) - self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', single_datasource.project_id) - self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', single_datasource.owner_id) - self.assertEqual(set(['world', 'indicators', 'sample']), single_datasource.tags) - - def test_update(self): + m.get(self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb", text=response_xml) + single_datasource = self.server.datasources.get_by_id("9dbd2263-16b5-46e1-9c43-a76bb8ab65fb") + + self.assertEqual("9dbd2263-16b5-46e1-9c43-a76bb8ab65fb", single_datasource.id) + self.assertEqual("dataengine", single_datasource.datasource_type) + self.assertEqual("abc description xyz", single_datasource.description) + self.assertEqual("Sampledatasource", single_datasource.content_url) + self.assertEqual("2016-08-04T21:31:55Z", format_datetime(single_datasource.created_at)) + self.assertEqual("2016-08-04T21:31:55Z", format_datetime(single_datasource.updated_at)) + self.assertEqual("default", single_datasource.project_name) + self.assertEqual("Sample datasource", single_datasource.name) + self.assertEqual("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", single_datasource.project_id) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", single_datasource.owner_id) + self.assertEqual({"world", "indicators", "sample"}, single_datasource.tags) + self.assertEqual(TSC.DatasourceItem.AskDataEnablement.SiteDefault, single_datasource.ask_data_enablement) + + def test_update(self) -> None: response_xml = read_xml_asset(UPDATE_XML) with requests_mock.mock() as m: - m.put(self.baseurl + '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb', text=response_xml) - single_datasource = TSC.DatasourceItem('test', '1d0304cd-3796-429f-b815-7258370b9b74') - single_datasource.owner_id = 'dd2239f6-ddf1-4107-981a-4cf94e415794' - single_datasource._id = '9dbd2263-16b5-46e1-9c43-a76bb8ab65fb' + m.put(self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb", text=response_xml) + single_datasource = TSC.DatasourceItem("1d0304cd-3796-429f-b815-7258370b9b74", "Sample datasource") + single_datasource.owner_id = "dd2239f6-ddf1-4107-981a-4cf94e415794" + single_datasource._content_url = "Sampledatasource" + single_datasource._id = "9dbd2263-16b5-46e1-9c43-a76bb8ab65fb" single_datasource.certified = True single_datasource.certification_note = "Warning, here be dragons." - single_datasource = self.server.datasources.update(single_datasource) - - self.assertEqual('9dbd2263-16b5-46e1-9c43-a76bb8ab65fb', single_datasource.id) - self.assertEqual('1d0304cd-3796-429f-b815-7258370b9b74', single_datasource.project_id) - self.assertEqual('dd2239f6-ddf1-4107-981a-4cf94e415794', single_datasource.owner_id) - self.assertEqual(True, single_datasource.certified) - self.assertEqual("Warning, here be dragons.", single_datasource.certification_note) + updated_datasource = self.server.datasources.update(single_datasource) - def test_update_copy_fields(self): - with open(asset(UPDATE_XML), 'rb') as f: - response_xml = f.read().decode('utf-8') + self.assertEqual(updated_datasource.id, single_datasource.id) + self.assertEqual(updated_datasource.name, single_datasource.name) + self.assertEqual(updated_datasource.content_url, single_datasource.content_url) + self.assertEqual(updated_datasource.project_id, single_datasource.project_id) + self.assertEqual(updated_datasource.owner_id, single_datasource.owner_id) + self.assertEqual(updated_datasource.certified, single_datasource.certified) + self.assertEqual(updated_datasource.certification_note, single_datasource.certification_note) + + def test_update_copy_fields(self) -> None: + with open(asset(UPDATE_XML), "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.put(self.baseurl + '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb', text=response_xml) - single_datasource = TSC.DatasourceItem('test', '1d0304cd-3796-429f-b815-7258370b9b74') - single_datasource._id = '9dbd2263-16b5-46e1-9c43-a76bb8ab65fb' - single_datasource._project_name = 'Tester' + m.put(self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb", text=response_xml) + single_datasource = TSC.DatasourceItem("1d0304cd-3796-429f-b815-7258370b9b74", "test") + single_datasource._id = "9dbd2263-16b5-46e1-9c43-a76bb8ab65fb" + single_datasource._project_name = "Tester" updated_datasource = self.server.datasources.update(single_datasource) self.assertEqual(single_datasource.tags, updated_datasource.tags) self.assertEqual(single_datasource._project_name, updated_datasource._project_name) - def test_update_tags(self): + def test_update_tags(self) -> None: add_tags_xml, update_xml = read_xml_assets(ADD_TAGS_XML, UPDATE_XML) with requests_mock.mock() as m: - m.put(self.baseurl + '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/tags', text=add_tags_xml) - m.delete(self.baseurl + '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/tags/b', status_code=204) - m.delete(self.baseurl + '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/tags/d', status_code=204) - m.put(self.baseurl + '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb', text=update_xml) - single_datasource = TSC.DatasourceItem('1d0304cd-3796-429f-b815-7258370b9b74') - single_datasource._id = '9dbd2263-16b5-46e1-9c43-a76bb8ab65fb' - single_datasource._initial_tags.update(['a', 'b', 'c', 'd']) - single_datasource.tags.update(['a', 'c', 'e']) + m.delete(self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/tags/b", status_code=204) + m.delete(self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/tags/d", status_code=204) + m.put(self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/tags", text=add_tags_xml) + m.put(self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb", text=update_xml) + single_datasource = TSC.DatasourceItem("1d0304cd-3796-429f-b815-7258370b9b74") + single_datasource._id = "9dbd2263-16b5-46e1-9c43-a76bb8ab65fb" + single_datasource._initial_tags.update(["a", "b", "c", "d"]) + single_datasource.tags.update(["a", "c", "e"]) updated_datasource = self.server.datasources.update(single_datasource) self.assertEqual(single_datasource.tags, updated_datasource.tags) self.assertEqual(single_datasource._initial_tags, updated_datasource._initial_tags) - def test_populate_connections(self): + def test_populate_connections(self) -> None: response_xml = read_xml_asset(POPULATE_CONNECTIONS_XML) with requests_mock.mock() as m: - m.get(self.baseurl + '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/connections', text=response_xml) - single_datasource = TSC.DatasourceItem('test', '1d0304cd-3796-429f-b815-7258370b9b74') - single_datasource.owner_id = 'dd2239f6-ddf1-4107-981a-4cf94e415794' - single_datasource._id = '9dbd2263-16b5-46e1-9c43-a76bb8ab65fb' + m.get(self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/connections", text=response_xml) + single_datasource = TSC.DatasourceItem("1d0304cd-3796-429f-b815-7258370b9b74", "test") + single_datasource.owner_id = "dd2239f6-ddf1-4107-981a-4cf94e415794" + single_datasource._id = "9dbd2263-16b5-46e1-9c43-a76bb8ab65fb" self.server.datasources.populate_connections(single_datasource) - - self.assertEqual('9dbd2263-16b5-46e1-9c43-a76bb8ab65fb', single_datasource.id) - - connections = single_datasource.connections - self.assertTrue(connections) - ds1, ds2, ds3 = connections - self.assertEqual(ds1.id, 'be786ae0-d2bf-4a4b-9b34-e2de8d2d4488') - self.assertEqual(ds2.id, '970e24bc-e200-4841-a3e9-66e7d122d77e') - self.assertEqual(ds3.id, '7d85b889-283b-42df-b23e-3c811e402f1f') - - def test_update_connection(self): + self.assertEqual("9dbd2263-16b5-46e1-9c43-a76bb8ab65fb", single_datasource.id) + connections: Optional[list[ConnectionItem]] = single_datasource.connections + + self.assertIsNotNone(connections) + assert connections is not None + ds1, ds2 = connections + self.assertEqual("be786ae0-d2bf-4a4b-9b34-e2de8d2d4488", ds1.id) + self.assertEqual("textscan", ds1.connection_type) + self.assertEqual("forty-two.net", ds1.server_address) + self.assertEqual("duo", ds1.username) + self.assertEqual(True, ds1.embed_password) + self.assertEqual(ds1.datasource_id, single_datasource.id) + self.assertEqual(single_datasource.name, ds1.datasource_name) + self.assertEqual("970e24bc-e200-4841-a3e9-66e7d122d77e", ds2.id) + self.assertEqual("sqlserver", ds2.connection_type) + self.assertEqual("database.com", ds2.server_address) + self.assertEqual("heero", ds2.username) + self.assertEqual(False, ds2.embed_password) + self.assertEqual(ds2.datasource_id, single_datasource.id) + self.assertEqual(single_datasource.name, ds2.datasource_name) + + def test_update_connection(self) -> None: populate_xml, response_xml = read_xml_assets(POPULATE_CONNECTIONS_XML, UPDATE_CONNECTION_XML) with requests_mock.mock() as m: - m.get(self.baseurl + '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/connections', text=populate_xml) - m.put(self.baseurl + - '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/connections/be786ae0-d2bf-4a4b-9b34-e2de8d2d4488', - text=response_xml) - single_datasource = TSC.DatasourceItem('test', '1d0304cd-3796-429f-b815-7258370b9b74') - single_datasource.owner_id = 'dd2239f6-ddf1-4107-981a-4cf94e415794' - single_datasource._id = '9dbd2263-16b5-46e1-9c43-a76bb8ab65fb' + m.get(self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/connections", text=populate_xml) + m.put( + self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/connections/be786ae0-d2bf-4a4b-9b34-e2de8d2d4488", + text=response_xml, + ) + single_datasource = TSC.DatasourceItem("be786ae0-d2bf-4a4b-9b34-e2de8d2d4488") + single_datasource.owner_id = "dd2239f6-ddf1-4107-981a-4cf94e415794" + single_datasource._id = "9dbd2263-16b5-46e1-9c43-a76bb8ab65fb" self.server.datasources.populate_connections(single_datasource) - connection = single_datasource.connections[0] - connection.server_address = 'bar' - connection.server_port = '9876' - connection.username = 'foo' + connection = single_datasource.connections[0] # type: ignore[index] + connection.server_address = "bar" + connection.server_port = "9876" + connection.username = "foo" new_connection = self.server.datasources.update_connection(single_datasource, connection) self.assertEqual(connection.id, new_connection.id) self.assertEqual(connection.connection_type, new_connection.connection_type) - self.assertEquals('bar', new_connection.server_address) - self.assertEquals('9876', new_connection.server_port) - self.assertEqual('foo', new_connection.username) + self.assertEqual("bar", new_connection.server_address) + self.assertEqual("9876", new_connection.server_port) + self.assertEqual("foo", new_connection.username) - def test_publish(self): + def test_populate_permissions(self) -> None: + with open(asset(POPULATE_PERMISSIONS_XML), "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.baseurl + "/0448d2ed-590d-4fa0-b272-a2a8a24555b5/permissions", text=response_xml) + single_datasource = TSC.DatasourceItem("1d0304cd-3796-429f-b815-7258370b9b74", "test") + single_datasource._id = "0448d2ed-590d-4fa0-b272-a2a8a24555b5" + + self.server.datasources.populate_permissions(single_datasource) + permissions = single_datasource.permissions + + self.assertEqual(permissions[0].grantee.tag_name, "group") # type: ignore[index] + self.assertEqual(permissions[0].grantee.id, "5e5e1978-71fa-11e4-87dd-7382f5c437af") # type: ignore[index] + self.assertDictEqual( + permissions[0].capabilities, # type: ignore[index] + { + TSC.Permission.Capability.Delete: TSC.Permission.Mode.Deny, + TSC.Permission.Capability.ChangePermissions: TSC.Permission.Mode.Deny, + TSC.Permission.Capability.Connect: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Read: TSC.Permission.Mode.Allow, + }, + ) + + self.assertEqual(permissions[1].grantee.tag_name, "user") # type: ignore[index] + self.assertEqual(permissions[1].grantee.id, "7c37ee24-c4b1-42b6-a154-eaeab7ee330a") # type: ignore[index] + self.assertDictEqual( + permissions[1].capabilities, # type: ignore[index] + { + TSC.Permission.Capability.Write: TSC.Permission.Mode.Allow, + }, + ) + + def test_publish(self) -> None: response_xml = read_xml_asset(PUBLISH_XML) with requests_mock.mock() as m: m.post(self.baseurl, text=response_xml) - new_datasource = TSC.DatasourceItem('SampleDS', 'ee8c6e70-43b6-11e6-af4f-f7b0d8e20760') + new_datasource = TSC.DatasourceItem("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", "SampleDS") publish_mode = self.server.PublishMode.CreateNew - new_datasource = self.server.datasources.publish(new_datasource, - asset('SampleDS.tds'), - mode=publish_mode) - - self.assertEqual('e76a1461-3b1d-4588-bf1b-17551a879ad9', new_datasource.id) - self.assertEqual('SampleDS', new_datasource.name) - self.assertEqual('SampleDS', new_datasource.content_url) - self.assertEqual('dataengine', new_datasource.datasource_type) - self.assertEqual('2016-08-11T21:22:40Z', format_datetime(new_datasource.created_at)) - self.assertEqual('2016-08-17T23:37:08Z', format_datetime(new_datasource.updated_at)) - self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', new_datasource.project_id) - self.assertEqual('default', new_datasource.project_name) - self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', new_datasource.owner_id) - - def test_publish_async(self): - response_xml = read_xml_asset(PUBLISH_XML_ASYNC) + new_datasource = self.server.datasources.publish(new_datasource, asset("SampleDS.tds"), mode=publish_mode) + + self.assertEqual("e76a1461-3b1d-4588-bf1b-17551a879ad9", new_datasource.id) + self.assertEqual("SampleDS", new_datasource.name) + self.assertEqual("SampleDS", new_datasource.content_url) + self.assertEqual("dataengine", new_datasource.datasource_type) + self.assertEqual("2016-08-11T21:22:40Z", format_datetime(new_datasource.created_at)) + self.assertEqual("2016-08-17T23:37:08Z", format_datetime(new_datasource.updated_at)) + self.assertEqual("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", new_datasource.project_id) + self.assertEqual("default", new_datasource.project_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", new_datasource.owner_id) + + def test_publish_a_non_packaged_file_object(self) -> None: + response_xml = read_xml_asset(PUBLISH_XML) with requests_mock.mock() as m: m.post(self.baseurl, text=response_xml) - new_datasource = TSC.DatasourceItem('SampleDS', 'ee8c6e70-43b6-11e6-af4f-f7b0d8e20760') + new_datasource = TSC.DatasourceItem("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", "SampleDS") publish_mode = self.server.PublishMode.CreateNew - new_job = self.server.datasources.publish(new_datasource, - asset('SampleDS.tds'), - mode=publish_mode, - as_job=True) + with open(asset("SampleDS.tds"), "rb") as file_object: + new_datasource = self.server.datasources.publish(new_datasource, file_object, mode=publish_mode) + + self.assertEqual("e76a1461-3b1d-4588-bf1b-17551a879ad9", new_datasource.id) + self.assertEqual("SampleDS", new_datasource.name) + self.assertEqual("SampleDS", new_datasource.content_url) + self.assertEqual("dataengine", new_datasource.datasource_type) + self.assertEqual("2016-08-11T21:22:40Z", format_datetime(new_datasource.created_at)) + self.assertEqual("2016-08-17T23:37:08Z", format_datetime(new_datasource.updated_at)) + self.assertEqual("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", new_datasource.project_id) + self.assertEqual("default", new_datasource.project_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", new_datasource.owner_id) + + def test_publish_a_packaged_file_object(self) -> None: + response_xml = read_xml_asset(PUBLISH_XML) + with requests_mock.mock() as m: + m.post(self.baseurl, text=response_xml) + new_datasource = TSC.DatasourceItem("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", "SampleDS") + publish_mode = self.server.PublishMode.CreateNew + + # Create a dummy tdsx file in memory + with BytesIO() as zip_archive: + with ZipFile(zip_archive, "w") as zf: + zf.write(asset("SampleDS.tds")) + + zip_archive.seek(0) + + new_datasource = self.server.datasources.publish(new_datasource, zip_archive, mode=publish_mode) + + self.assertEqual("e76a1461-3b1d-4588-bf1b-17551a879ad9", new_datasource.id) + self.assertEqual("SampleDS", new_datasource.name) + self.assertEqual("SampleDS", new_datasource.content_url) + self.assertEqual("dataengine", new_datasource.datasource_type) + self.assertEqual("2016-08-11T21:22:40Z", format_datetime(new_datasource.created_at)) + self.assertEqual("2016-08-17T23:37:08Z", format_datetime(new_datasource.updated_at)) + self.assertEqual("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", new_datasource.project_id) + self.assertEqual("default", new_datasource.project_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", new_datasource.owner_id) + + def test_publish_async(self) -> None: + self.server.version = "3.0" + baseurl = self.server.datasources.baseurl + response_xml = read_xml_asset(PUBLISH_XML_ASYNC) + with requests_mock.mock() as m: + m.post(baseurl, text=response_xml) + new_datasource = TSC.DatasourceItem("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", "SampleDS") + publish_mode = self.server.PublishMode.CreateNew + + new_job = self.server.datasources.publish( + new_datasource, asset("SampleDS.tds"), mode=publish_mode, as_job=True + ) + + self.assertEqual("9a373058-af5f-4f83-8662-98b3e0228a73", new_job.id) + self.assertEqual("PublishDatasource", new_job.type) + self.assertEqual("0", new_job.progress) + self.assertEqual("2018-06-30T00:54:54Z", format_datetime(new_job.created_at)) + self.assertEqual(1, new_job.finish_code) + + def test_publish_unnamed_file_object(self) -> None: + new_datasource = TSC.DatasourceItem("test") + publish_mode = self.server.PublishMode.CreateNew + + with open(asset("SampleDS.tds"), "rb") as file_object: + self.assertRaises(ValueError, self.server.datasources.publish, new_datasource, file_object, publish_mode) + + def test_refresh_id(self) -> None: + self.server.version = "2.8" + self.baseurl = self.server.datasources.baseurl + response_xml = read_xml_asset(REFRESH_XML) + with requests_mock.mock() as m: + m.post(self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/refresh", status_code=202, text=response_xml) + new_job = self.server.datasources.refresh("9dbd2263-16b5-46e1-9c43-a76bb8ab65fb") + + self.assertEqual("7c3d599e-949f-44c3-94a1-f30ba85757e4", new_job.id) + self.assertEqual("RefreshExtract", new_job.type) + self.assertEqual(None, new_job.progress) + self.assertEqual("2020-03-05T22:05:32Z", format_datetime(new_job.created_at)) + self.assertEqual(-1, new_job.finish_code) + + def test_refresh_object(self) -> None: + self.server.version = "2.8" + self.baseurl = self.server.datasources.baseurl + datasource = TSC.DatasourceItem("") + datasource._id = "9dbd2263-16b5-46e1-9c43-a76bb8ab65fb" + response_xml = read_xml_asset(REFRESH_XML) + with requests_mock.mock() as m: + m.post(self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/refresh", status_code=202, text=response_xml) + new_job = self.server.datasources.refresh(datasource) + + # We only check the `id`; remaining fields are already tested in `test_refresh_id` + self.assertEqual("7c3d599e-949f-44c3-94a1-f30ba85757e4", new_job.id) + + def test_update_hyper_data_datasource_object(self) -> None: + """Calling `update_hyper_data` with a `DatasourceItem` should update that datasource""" + self.server.version = "3.13" + self.baseurl = self.server.datasources.baseurl + + datasource = TSC.DatasourceItem("") + datasource._id = "9dbd2263-16b5-46e1-9c43-a76bb8ab65fb" + response_xml = read_xml_asset(UPDATE_HYPER_DATA_XML) + with requests_mock.mock() as m: + m.patch( + self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/data", + status_code=202, + headers={"requestid": "test_id"}, + text=response_xml, + ) + new_job = self.server.datasources.update_hyper_data(datasource, request_id="test_id", actions=[]) + + self.assertEqual("5c0ba560-c959-424e-b08a-f32ef0bfb737", new_job.id) + self.assertEqual("UpdateUploadedFile", new_job.type) + self.assertEqual(None, new_job.progress) + self.assertEqual("2021-09-18T09:40:12Z", format_datetime(new_job.created_at)) + self.assertEqual(-1, new_job.finish_code) + + def test_update_hyper_data_connection_object(self) -> None: + """Calling `update_hyper_data` with a `ConnectionItem` should update that connection""" + self.server.version = "3.13" + self.baseurl = self.server.datasources.baseurl + + connection = TSC.ConnectionItem() + connection._datasource_id = "9dbd2263-16b5-46e1-9c43-a76bb8ab65fb" + connection._id = "7ecaccd8-39b0-4875-a77d-094f6e930019" + response_xml = read_xml_asset(UPDATE_HYPER_DATA_XML) + with requests_mock.mock() as m: + m.patch( + self.baseurl + + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/connections/7ecaccd8-39b0-4875-a77d-094f6e930019/data", + status_code=202, + headers={"requestid": "test_id"}, + text=response_xml, + ) + new_job = self.server.datasources.update_hyper_data(connection, request_id="test_id", actions=[]) + + # We only check the `id`; remaining fields are already tested in `test_update_hyper_data_datasource_object` + self.assertEqual("5c0ba560-c959-424e-b08a-f32ef0bfb737", new_job.id) + + def test_update_hyper_data_datasource_string(self) -> None: + """For convenience, calling `update_hyper_data` with a `str` should update the datasource with the corresponding UUID""" + self.server.version = "3.13" + self.baseurl = self.server.datasources.baseurl - self.assertEqual('9a373058-af5f-4f83-8662-98b3e0228a73', new_job.id) - self.assertEqual('PublishDatasource', new_job.type) - self.assertEqual('0', new_job.progress) - self.assertEqual('2018-06-30T00:54:54Z', format_datetime(new_job.created_at)) - self.assertEqual('1', new_job.finish_code) + datasource_id = "9dbd2263-16b5-46e1-9c43-a76bb8ab65fb" + response_xml = read_xml_asset(UPDATE_HYPER_DATA_XML) + with requests_mock.mock() as m: + m.patch( + self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/data", + status_code=202, + headers={"requestid": "test_id"}, + text=response_xml, + ) + new_job = self.server.datasources.update_hyper_data(datasource_id, request_id="test_id", actions=[]) + + # We only check the `id`; remaining fields are already tested in `test_update_hyper_data_datasource_object` + self.assertEqual("5c0ba560-c959-424e-b08a-f32ef0bfb737", new_job.id) + + def test_update_hyper_data_datasource_payload_file(self) -> None: + """If `payload` is present, we upload it and associate the job with it""" + self.server.version = "3.13" + self.baseurl = self.server.datasources.baseurl - def test_delete(self): + datasource_id = "9dbd2263-16b5-46e1-9c43-a76bb8ab65fb" + mock_upload_id = "10051:c3e56879876842d4b3600f20c1f79876-0:0" + response_xml = read_xml_asset(UPDATE_HYPER_DATA_XML) + with requests_mock.mock() as rm, unittest.mock.patch.object(Fileuploads, "upload", return_value=mock_upload_id): + rm.patch( + self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/data?uploadSessionId=" + mock_upload_id, + status_code=202, + headers={"requestid": "test_id"}, + text=response_xml, + ) + new_job = self.server.datasources.update_hyper_data( + datasource_id, request_id="test_id", actions=[], payload=asset("World Indicators.hyper") + ) + + # We only check the `id`; remaining fields are already tested in `test_update_hyper_data_datasource_object` + self.assertEqual("5c0ba560-c959-424e-b08a-f32ef0bfb737", new_job.id) + + def test_update_hyper_data_datasource_invalid_payload_file(self) -> None: + """If `payload` points to a non-existing file, we report an error""" + self.server.version = "3.13" + self.baseurl = self.server.datasources.baseurl + datasource_id = "9dbd2263-16b5-46e1-9c43-a76bb8ab65fb" + with self.assertRaises(IOError) as cm: + self.server.datasources.update_hyper_data( + datasource_id, request_id="test_id", actions=[], payload="no/such/file.missing" + ) + exception = cm.exception + self.assertEqual(str(exception), "File path does not lead to an existing file.") + + def test_delete(self) -> None: with requests_mock.mock() as m: - m.delete(self.baseurl + '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb', status_code=204) - self.server.datasources.delete('9dbd2263-16b5-46e1-9c43-a76bb8ab65fb') + m.delete(self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb", status_code=204) + self.server.datasources.delete("9dbd2263-16b5-46e1-9c43-a76bb8ab65fb") - def test_download(self): + def test_download(self) -> None: with requests_mock.mock() as m: - m.get(self.baseurl + '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/content', - headers={'Content-Disposition': 'name="tableau_datasource"; filename="Sample datasource.tds"'}) - file_path = self.server.datasources.download('9dbd2263-16b5-46e1-9c43-a76bb8ab65fb') + m.get( + self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/content", + headers={"Content-Disposition": 'name="tableau_datasource"; filename="Sample datasource.tds"'}, + ) + file_path = self.server.datasources.download("9dbd2263-16b5-46e1-9c43-a76bb8ab65fb") self.assertTrue(os.path.exists(file_path)) os.remove(file_path) - def test_download_sanitizes_name(self): + def test_download_object(self) -> None: + with BytesIO() as file_object: + with requests_mock.mock() as m: + m.get( + self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/content", + headers={"Content-Disposition": 'name="tableau_datasource"; filename="Sample datasource.tds"'}, + ) + file_path = self.server.datasources.download( + "9dbd2263-16b5-46e1-9c43-a76bb8ab65fb", filepath=file_object + ) + self.assertTrue(isinstance(file_path, BytesIO)) + + def test_download_sanitizes_name(self) -> None: filename = "Name,With,Commas.tds" - disposition = 'name="tableau_workbook"; filename="{}"'.format(filename) + disposition = f'name="tableau_workbook"; filename="{filename}"' with requests_mock.mock() as m: - m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/content', - headers={'Content-Disposition': disposition}) - file_path = self.server.datasources.download('1f951daf-4061-451a-9df1-69a8062664f2') + m.get( + self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/content", + headers={"Content-Disposition": disposition}, + ) + file_path = self.server.datasources.download("1f951daf-4061-451a-9df1-69a8062664f2") self.assertEqual(os.path.basename(file_path), "NameWithCommas.tds") self.assertTrue(os.path.exists(file_path)) os.remove(file_path) - def test_download_extract_only(self): + def test_download_extract_only(self) -> None: # Pretend we're 2.5 for 'extract_only' self.server.version = "2.5" self.baseurl = self.server.datasources.baseurl with requests_mock.mock() as m: - m.get(self.baseurl + '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/content?includeExtract=False', - headers={'Content-Disposition': 'name="tableau_datasource"; filename="Sample datasource.tds"'}, - complete_qs=True) - file_path = self.server.datasources.download('9dbd2263-16b5-46e1-9c43-a76bb8ab65fb', include_extract=False) + m.get( + self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/content?includeExtract=False", + headers={"Content-Disposition": 'name="tableau_datasource"; filename="Sample datasource.tds"'}, + complete_qs=True, + ) + file_path = self.server.datasources.download("9dbd2263-16b5-46e1-9c43-a76bb8ab65fb", include_extract=False) self.assertTrue(os.path.exists(file_path)) os.remove(file_path) - def test_update_missing_id(self): - single_datasource = TSC.DatasourceItem('test', 'ee8c6e70-43b6-11e6-af4f-f7b0d8e20760') + def test_update_missing_id(self) -> None: + single_datasource = TSC.DatasourceItem("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", "test") self.assertRaises(TSC.MissingRequiredFieldError, self.server.datasources.update, single_datasource) - def test_publish_missing_path(self): - new_datasource = TSC.DatasourceItem('test', 'ee8c6e70-43b6-11e6-af4f-f7b0d8e20760') - self.assertRaises(IOError, self.server.datasources.publish, new_datasource, - '', self.server.PublishMode.CreateNew) - - def test_publish_missing_mode(self): - new_datasource = TSC.DatasourceItem('test', 'ee8c6e70-43b6-11e6-af4f-f7b0d8e20760') - self.assertRaises(ValueError, self.server.datasources.publish, new_datasource, - asset('SampleDS.tds'), None) - - def test_publish_invalid_file_type(self): - new_datasource = TSC.DatasourceItem('test', 'ee8c6e70-43b6-11e6-af4f-f7b0d8e20760') - self.assertRaises(ValueError, self.server.datasources.publish, new_datasource, - asset('SampleWB.twbx'), self.server.PublishMode.Append) - - def test_publish_multi_connection(self): - new_datasource = TSC.DatasourceItem(name='Sample', project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760') + def test_publish_missing_path(self) -> None: + new_datasource = TSC.DatasourceItem("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", "test") + self.assertRaises( + IOError, self.server.datasources.publish, new_datasource, "", self.server.PublishMode.CreateNew + ) + + def test_publish_missing_mode(self) -> None: + new_datasource = TSC.DatasourceItem("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", "test") + self.assertRaises(ValueError, self.server.datasources.publish, new_datasource, asset("SampleDS.tds"), None) + + def test_publish_invalid_file_type(self) -> None: + new_datasource = TSC.DatasourceItem("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", "test") + self.assertRaises( + ValueError, + self.server.datasources.publish, + new_datasource, + asset("SampleWB.twbx"), + self.server.PublishMode.Append, + ) + + def test_publish_hyper_file_object_raises_exception(self) -> None: + new_datasource = TSC.DatasourceItem("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", "test") + with open(asset("World Indicators.hyper"), "rb") as file_object: + self.assertRaises( + ValueError, self.server.datasources.publish, new_datasource, file_object, self.server.PublishMode.Append + ) + + def test_publish_tde_file_object_raises_exception(self) -> None: + new_datasource = TSC.DatasourceItem("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", "test") + tds_asset = asset(os.path.join("Data", "Tableau Samples", "World Indicators.tde")) + with open(tds_asset, "rb") as file_object: + self.assertRaises( + ValueError, self.server.datasources.publish, new_datasource, file_object, self.server.PublishMode.Append + ) + + def test_publish_file_object_of_unknown_type_raises_exception(self) -> None: + new_datasource = TSC.DatasourceItem("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", "test") + + with BytesIO() as file_object: + file_object.write(bytes.fromhex("89504E470D0A1A0A")) + file_object.seek(0) + self.assertRaises( + ValueError, self.server.datasources.publish, new_datasource, file_object, self.server.PublishMode.Append + ) + + def test_publish_multi_connection(self) -> None: + new_datasource = TSC.DatasourceItem(name="Sample", project_id="ee8c6e70-43b6-11e6-af4f-f7b0d8e20760") connection1 = TSC.ConnectionItem() - connection1.server_address = 'mysql.test.com' - connection1.connection_credentials = TSC.ConnectionCredentials('test', 'secret', True) + connection1.server_address = "mysql.test.com" + connection1.connection_credentials = TSC.ConnectionCredentials("test", "secret", True) connection2 = TSC.ConnectionItem() - connection2.server_address = 'pgsql.test.com' - connection2.connection_credentials = TSC.ConnectionCredentials('test', 'secret', True) + connection2.server_address = "pgsql.test.com" + connection2.connection_credentials = TSC.ConnectionCredentials("test", "secret", True) response = RequestFactory.Datasource._generate_xml(new_datasource, connections=[connection1, connection2]) # Can't use ConnectionItem parser due to xml namespace problems - connection_results = ET.fromstring(response).findall('.//connection') + connection_results = fromstring(response).findall(".//connection") - self.assertEqual(connection_results[0].get('serverAddress', None), 'mysql.test.com') - self.assertEqual(connection_results[0].find('connectionCredentials').get('name', None), 'test') - self.assertEqual(connection_results[1].get('serverAddress', None), 'pgsql.test.com') - self.assertEqual(connection_results[1].find('connectionCredentials').get('password', None), 'secret') + self.assertEqual(connection_results[0].get("serverAddress", None), "mysql.test.com") + self.assertEqual(connection_results[0].find("connectionCredentials").get("name", None), "test") # type: ignore[union-attr] + self.assertEqual(connection_results[1].get("serverAddress", None), "pgsql.test.com") + self.assertEqual(connection_results[1].find("connectionCredentials").get("password", None), "secret") # type: ignore[union-attr] - def test_publish_single_connection(self): - new_datasource = TSC.DatasourceItem(name='Sample', project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760') - connection_creds = TSC.ConnectionCredentials('test', 'secret', True) + def test_publish_single_connection(self) -> None: + new_datasource = TSC.DatasourceItem(name="Sample", project_id="ee8c6e70-43b6-11e6-af4f-f7b0d8e20760") + connection_creds = TSC.ConnectionCredentials("test", "secret", True) response = RequestFactory.Datasource._generate_xml(new_datasource, connection_credentials=connection_creds) # Can't use ConnectionItem parser due to xml namespace problems - credentials = ET.fromstring(response).findall('.//connectionCredentials') + credentials = fromstring(response).findall(".//connectionCredentials") self.assertEqual(len(credentials), 1) - self.assertEqual(credentials[0].get('name', None), 'test') - self.assertEqual(credentials[0].get('password', None), 'secret') - self.assertEqual(credentials[0].get('embed', None), 'true') + self.assertEqual(credentials[0].get("name", None), "test") + self.assertEqual(credentials[0].get("password", None), "secret") + self.assertEqual(credentials[0].get("embed", None), "true") - def test_credentials_and_multi_connect_raises_exception(self): - new_datasource = TSC.DatasourceItem(name='Sample', project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760') + def test_credentials_and_multi_connect_raises_exception(self) -> None: + new_datasource = TSC.DatasourceItem(name="Sample", project_id="ee8c6e70-43b6-11e6-af4f-f7b0d8e20760") - connection_creds = TSC.ConnectionCredentials('test', 'secret', True) + connection_creds = TSC.ConnectionCredentials("test", "secret", True) connection1 = TSC.ConnectionItem() - connection1.server_address = 'mysql.test.com' - connection1.connection_credentials = TSC.ConnectionCredentials('test', 'secret', True) + connection1.server_address = "mysql.test.com" + connection1.connection_credentials = TSC.ConnectionCredentials("test", "secret", True) with self.assertRaises(RuntimeError): - response = RequestFactory.Datasource._generate_xml(new_datasource, - connection_credentials=connection_creds, - connections=[connection1]) + response = RequestFactory.Datasource._generate_xml( + new_datasource, connection_credentials=connection_creds, connections=[connection1] + ) + + def test_synchronous_publish_timeout_error(self) -> None: + with requests_mock.mock() as m: + m.register_uri("POST", self.baseurl, status_code=504) + + new_datasource = TSC.DatasourceItem(project_id="") + publish_mode = self.server.PublishMode.CreateNew + # http://test/api/2.4/sites/dad65087-b08b-4603-af4e-2887b8aafc67/datasources?datasourceType=tds + self.assertRaisesRegex( + InternalServerError, + "Please use asynchronous publishing to avoid timeouts.", + self.server.datasources.publish, + new_datasource, + asset("SampleDS.tds"), + publish_mode, + ) + + def test_delete_extracts(self) -> None: + self.server.version = "3.10" + self.baseurl = self.server.datasources.baseurl + with requests_mock.mock() as m: + m.post(self.baseurl + "/3cc6cd06-89ce-4fdc-b935-5294135d6d42/deleteExtract", status_code=200) + self.server.datasources.delete_extract("3cc6cd06-89ce-4fdc-b935-5294135d6d42") + + def test_create_extracts(self) -> None: + self.server.version = "3.10" + self.baseurl = self.server.datasources.baseurl + + response_xml = read_xml_asset(PUBLISH_XML_ASYNC) + with requests_mock.mock() as m: + m.post( + self.baseurl + "/3cc6cd06-89ce-4fdc-b935-5294135d6d42/createExtract", status_code=200, text=response_xml + ) + self.server.datasources.create_extract("3cc6cd06-89ce-4fdc-b935-5294135d6d42") + + def test_create_extracts_encrypted(self) -> None: + self.server.version = "3.10" + self.baseurl = self.server.datasources.baseurl + + response_xml = read_xml_asset(PUBLISH_XML_ASYNC) + with requests_mock.mock() as m: + m.post( + self.baseurl + "/3cc6cd06-89ce-4fdc-b935-5294135d6d42/createExtract", status_code=200, text=response_xml + ) + self.server.datasources.create_extract("3cc6cd06-89ce-4fdc-b935-5294135d6d42", True) + + def test_revisions(self) -> None: + datasource = TSC.DatasourceItem("project", "test") + datasource._id = "06b944d2-959d-4604-9305-12323c95e70e" + + response_xml = read_xml_asset(REVISION_XML) + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/{datasource.id}/revisions", text=response_xml) + self.server.datasources.populate_revisions(datasource) + revisions = datasource.revisions + + self.assertEqual(len(revisions), 3) + self.assertEqual("2016-07-26T20:34:56Z", format_datetime(revisions[0].created_at)) + self.assertEqual("2016-07-27T20:34:56Z", format_datetime(revisions[1].created_at)) + self.assertEqual("2016-07-28T20:34:56Z", format_datetime(revisions[2].created_at)) + + self.assertEqual(False, revisions[0].deleted) + self.assertEqual(False, revisions[0].current) + self.assertEqual(False, revisions[1].deleted) + self.assertEqual(False, revisions[1].current) + self.assertEqual(False, revisions[2].deleted) + self.assertEqual(True, revisions[2].current) + + self.assertEqual("Cassie", revisions[0].user_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", revisions[0].user_id) + self.assertIsNone(revisions[1].user_name) + self.assertIsNone(revisions[1].user_id) + self.assertEqual("Cassie", revisions[2].user_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", revisions[2].user_id) + + def test_delete_revision(self) -> None: + datasource = TSC.DatasourceItem("project", "test") + datasource._id = "06b944d2-959d-4604-9305-12323c95e70e" + + with requests_mock.mock() as m: + m.delete(f"{self.baseurl}/{datasource.id}/revisions/3") + self.server.datasources.delete_revision(datasource.id, "3") + + def test_download_revision(self) -> None: + with requests_mock.mock() as m, tempfile.TemporaryDirectory() as td: + m.get( + self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/revisions/3/content", + headers={"Content-Disposition": 'name="tableau_datasource"; filename="Sample datasource.tds"'}, + ) + file_path = self.server.datasources.download_revision("9dbd2263-16b5-46e1-9c43-a76bb8ab65fb", "3", td) + self.assertTrue(os.path.exists(file_path)) + + def test_bad_download_response(self) -> None: + with requests_mock.mock() as m, tempfile.TemporaryDirectory() as td: + m.get( + self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/content", + headers={ + "Content-Disposition": '''name="tableau_datasource"; filename*=UTF-8''"Sample datasource.tds"''' + }, + ) + file_path = self.server.datasources.download("9dbd2263-16b5-46e1-9c43-a76bb8ab65fb", td) + self.assertTrue(os.path.exists(file_path)) diff --git a/test/test_datasource_model.py b/test/test_datasource_model.py index 600587801..655284194 100644 --- a/test/test_datasource_model.py +++ b/test/test_datasource_model.py @@ -1,11 +1,18 @@ -import datetime import unittest import tableauserverclient as TSC class DatasourceModelTests(unittest.TestCase): - def test_invalid_project_id(self): - self.assertRaises(ValueError, TSC.DatasourceItem, None) + def test_nullable_project_id(self): + datasource = TSC.DatasourceItem(name="10") + self.assertEqual(datasource.project_id, None) + + def test_require_boolean_flag_bridge_fail(self): datasource = TSC.DatasourceItem("10") with self.assertRaises(ValueError): - datasource.project_id = None + datasource.use_remote_query_agent = "yes" + + def test_require_boolean_flag_bridge_ok(self): + datasource = TSC.DatasourceItem("10") + datasource.use_remote_query_agent = True + self.assertEqual(datasource.use_remote_query_agent, True) diff --git a/test/test_dqw.py b/test/test_dqw.py new file mode 100644 index 000000000..6d1219f66 --- /dev/null +++ b/test/test_dqw.py @@ -0,0 +1,11 @@ +import unittest +import tableauserverclient as TSC + + +class DQWTests(unittest.TestCase): + def test_existence(self): + dqw: TSC.DQWItem = TSC.DQWItem() + dqw.message = "message" + dqw.warning_type = TSC.DQWItem.WarningType.STALE + dqw.active = True + dqw.severe = True diff --git a/test/test_endpoint.py b/test/test_endpoint.py new file mode 100644 index 000000000..ff1ef0f72 --- /dev/null +++ b/test/test_endpoint.py @@ -0,0 +1,83 @@ +from pathlib import Path +import pytest +import requests +import unittest + +import tableauserverclient as TSC + +import requests_mock + +ASSETS = Path(__file__).parent / "assets" + + +class TestEndpoint(unittest.TestCase): + def setUp(self) -> None: + self.server = TSC.Server("http://test/", use_server_version=False) + + # Fake signin + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + return super().setUp() + + def test_fallback_request_logic(self) -> None: + url = "http://test/" + endpoint = TSC.server.Endpoint(self.server) + with requests_mock.mock() as m: + m.get(url) + response = endpoint.get_request(url=url) + self.assertIsNotNone(response) + + def test_user_friendly_request_returns(self) -> None: + url = "http://test/" + endpoint = TSC.server.Endpoint(self.server) + with requests_mock.mock() as m: + m.get(url) + response = endpoint.send_request_while_show_progress_threaded( + endpoint.parent_srv.session.get, url=url, request_timeout=2 + ) + self.assertIsNotNone(response) + + def test_blocking_request_raises_request_error(self) -> None: + with pytest.raises(requests.exceptions.ConnectionError): + url = "http://test/" + endpoint = TSC.server.Endpoint(self.server) + response = endpoint._blocking_request(endpoint.parent_srv.session.get, url=url) + self.assertIsNotNone(response) + + def test_get_request_stream(self) -> None: + url = "http://test/" + endpoint = TSC.server.Endpoint(self.server) + with requests_mock.mock() as m: + m.get(url, headers={"Content-Type": "application/octet-stream"}) + response = endpoint.get_request(url, parameters={"stream": True}) + + self.assertFalse(response._content_consumed) + + def test_binary_log_truncated(self): + class FakeResponse: + headers = {"Content-Type": "application/octet-stream"} + content = b"\x1337" * 1000 + status_code = 200 + + endpoint = TSC.server.Endpoint(self.server) + server_response = FakeResponse() + log = endpoint.log_response_safely(server_response) + self.assertTrue(log.find("[Truncated File Contents]") > 0, log) + + def test_set_user_agent_from_options_headers(self): + params = {"User-Agent": "1", "headers": {"User-Agent": "2"}} + result = TSC.server.Endpoint.set_user_agent(params) + # it should use the value under 'headers' if more than one is given + print(result) + print(result["headers"]["User-Agent"]) + self.assertTrue(result["headers"]["User-Agent"] == "2") + + def test_set_user_agent_from_options(self): + params = {"headers": {"User-Agent": "2"}} + result = TSC.server.Endpoint.set_user_agent(params) + self.assertTrue(result["headers"]["User-Agent"] == "2") + + def test_set_user_agent_when_blank(self): + params = {"headers": {}} + result = TSC.server.Endpoint.set_user_agent(params) + self.assertTrue(result["headers"]["User-Agent"].startswith("Tableau Server Client")) diff --git a/test/test_exponential_backoff.py b/test/test_exponential_backoff.py new file mode 100644 index 000000000..a07eb5d3a --- /dev/null +++ b/test/test_exponential_backoff.py @@ -0,0 +1,60 @@ +import unittest + +from tableauserverclient.exponential_backoff import ExponentialBackoffTimer +from ._utils import mocked_time + + +class ExponentialBackoffTests(unittest.TestCase): + def test_exponential(self): + with mocked_time() as mock_time: + exponentialBackoff = ExponentialBackoffTimer() + # The creation of our mock shouldn't sleep + self.assertAlmostEqual(mock_time(), 0) + # The first sleep sleeps for a rather short time, the following sleeps become longer + exponentialBackoff.sleep() + self.assertAlmostEqual(mock_time(), 0.5) + exponentialBackoff.sleep() + self.assertAlmostEqual(mock_time(), 1.2) + exponentialBackoff.sleep() + self.assertAlmostEqual(mock_time(), 2.18) + exponentialBackoff.sleep() + self.assertAlmostEqual(mock_time(), 3.552) + exponentialBackoff.sleep() + self.assertAlmostEqual(mock_time(), 5.4728) + + def test_exponential_saturation(self): + with mocked_time() as mock_time: + exponentialBackoff = ExponentialBackoffTimer() + for _ in range(99): + exponentialBackoff.sleep() + # We don't increase the sleep time above 30 seconds. + # Otherwise, the exponential sleep time could easily + # reach minutes or even hours between polls + for _ in range(5): + s = mock_time() + exponentialBackoff.sleep() + slept = mock_time() - s + self.assertAlmostEqual(slept, 30) + + def test_timeout(self): + with mocked_time() as mock_time: + exponentialBackoff = ExponentialBackoffTimer(timeout=4.5) + for _ in range(4): + exponentialBackoff.sleep() + self.assertAlmostEqual(mock_time(), 3.552) + # Usually, the following sleep would sleep until 5.5, but due to + # the timeout we wait less; thereby we make sure to take the timeout + # into account as good as possible + exponentialBackoff.sleep() + self.assertAlmostEqual(mock_time(), 4.5) + # The next call to `sleep` will raise a TimeoutError + with self.assertRaises(TimeoutError): + exponentialBackoff.sleep() + + def test_timeout_zero(self): + with mocked_time() as mock_time: + # The construction of the timer doesn't throw, yet + exponentialBackoff = ExponentialBackoffTimer(timeout=0) + # But the first `sleep` immediately throws + with self.assertRaises(TimeoutError): + exponentialBackoff.sleep() diff --git a/test/test_favorites.py b/test/test_favorites.py new file mode 100644 index 000000000..87332d70f --- /dev/null +++ b/test/test_favorites.py @@ -0,0 +1,119 @@ +import unittest + +import requests_mock + +import tableauserverclient as TSC +from ._utils import read_xml_asset + +GET_FAVORITES_XML = "favorites_get.xml" +ADD_FAVORITE_WORKBOOK_XML = "favorites_add_workbook.xml" +ADD_FAVORITE_VIEW_XML = "favorites_add_view.xml" +ADD_FAVORITE_DATASOURCE_XML = "favorites_add_datasource.xml" +ADD_FAVORITE_PROJECT_XML = "favorites_add_project.xml" + + +class FavoritesTests(unittest.TestCase): + def setUp(self): + self.server = TSC.Server("http://test", False) + self.server.version = "2.5" + + # Fake signin + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + + self.baseurl = self.server.favorites.baseurl + self.user = TSC.UserItem("alice", TSC.UserItem.Roles.Viewer) + self.user._id = "dd2239f6-ddf1-4107-981a-4cf94e415794" + + def test_get(self) -> None: + response_xml = read_xml_asset(GET_FAVORITES_XML) + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/{self.user.id}", text=response_xml) + self.server.favorites.get(self.user) + self.assertIsNotNone(self.user._favorites) + self.assertEqual(len(self.user.favorites["workbooks"]), 1) + self.assertEqual(len(self.user.favorites["views"]), 1) + self.assertEqual(len(self.user.favorites["projects"]), 1) + self.assertEqual(len(self.user.favorites["datasources"]), 1) + + workbook = self.user.favorites["workbooks"][0] + print("favorited: ") + print(workbook) + view = self.user.favorites["views"][0] + datasource = self.user.favorites["datasources"][0] + project = self.user.favorites["projects"][0] + + self.assertEqual(workbook.id, "6d13b0ca-043d-4d42-8c9d-3f3313ea3a00") + self.assertEqual(view.id, "d79634e1-6063-4ec9-95ff-50acbf609ff5") + self.assertEqual(datasource.id, "e76a1461-3b1d-4588-bf1b-17551a879ad9") + self.assertEqual(project.id, "1d0304cd-3796-429f-b815-7258370b9b74") + + def test_add_favorite_workbook(self) -> None: + response_xml = read_xml_asset(ADD_FAVORITE_WORKBOOK_XML) + workbook = TSC.WorkbookItem("") + workbook._id = "6d13b0ca-043d-4d42-8c9d-3f3313ea3a00" + workbook.name = "Superstore" + with requests_mock.mock() as m: + m.put(f"{self.baseurl}/{self.user.id}", text=response_xml) + self.server.favorites.add_favorite_workbook(self.user, workbook) + + def test_add_favorite_view(self) -> None: + response_xml = read_xml_asset(ADD_FAVORITE_VIEW_XML) + view = TSC.ViewItem() + view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" + view._name = "ENDANGERED SAFARI" + with requests_mock.mock() as m: + m.put(f"{self.baseurl}/{self.user.id}", text=response_xml) + self.server.favorites.add_favorite_view(self.user, view) + + def test_add_favorite_datasource(self) -> None: + response_xml = read_xml_asset(ADD_FAVORITE_DATASOURCE_XML) + datasource = TSC.DatasourceItem("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760") + datasource._id = "e76a1461-3b1d-4588-bf1b-17551a879ad9" + datasource.name = "SampleDS" + with requests_mock.mock() as m: + m.put(f"{self.baseurl}/{self.user.id}", text=response_xml) + self.server.favorites.add_favorite_datasource(self.user, datasource) + + def test_add_favorite_project(self) -> None: + self.server.version = "3.1" + baseurl = self.server.favorites.baseurl + response_xml = read_xml_asset(ADD_FAVORITE_PROJECT_XML) + project = TSC.ProjectItem("Tableau") + project._id = "1d0304cd-3796-429f-b815-7258370b9b74" + with requests_mock.mock() as m: + m.put(f"{baseurl}/{self.user.id}", text=response_xml) + self.server.favorites.add_favorite_project(self.user, project) + + def test_delete_favorite_workbook(self) -> None: + workbook = TSC.WorkbookItem("") + workbook._id = "6d13b0ca-043d-4d42-8c9d-3f3313ea3a00" + workbook.name = "Superstore" + with requests_mock.mock() as m: + m.delete(f"{self.baseurl}/{self.user.id}/workbooks/{workbook.id}") + self.server.favorites.delete_favorite_workbook(self.user, workbook) + + def test_delete_favorite_view(self) -> None: + view = TSC.ViewItem() + view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" + view._name = "ENDANGERED SAFARI" + with requests_mock.mock() as m: + m.delete(f"{self.baseurl}/{self.user.id}/views/{view.id}") + self.server.favorites.delete_favorite_view(self.user, view) + + def test_delete_favorite_datasource(self) -> None: + datasource = TSC.DatasourceItem("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760") + datasource._id = "e76a1461-3b1d-4588-bf1b-17551a879ad9" + datasource.name = "SampleDS" + with requests_mock.mock() as m: + m.delete(f"{self.baseurl}/{self.user.id}/datasources/{datasource.id}") + self.server.favorites.delete_favorite_datasource(self.user, datasource) + + def test_delete_favorite_project(self) -> None: + self.server.version = "3.1" + baseurl = self.server.favorites.baseurl + project = TSC.ProjectItem("Tableau") + project._id = "1d0304cd-3796-429f-b815-7258370b9b74" + with requests_mock.mock() as m: + m.delete(f"{baseurl}/{self.user.id}/projects/{project.id}") + self.server.favorites.delete_favorite_project(self.user, project) diff --git a/test/test_filesys_helpers.py b/test/test_filesys_helpers.py new file mode 100644 index 000000000..0f3234d5d --- /dev/null +++ b/test/test_filesys_helpers.py @@ -0,0 +1,99 @@ +import os +import unittest +from io import BytesIO +from xml.etree import ElementTree as ET +from zipfile import ZipFile + +from tableauserverclient.filesys_helpers import get_file_object_size, get_file_type +from ._utils import asset, TEST_ASSET_DIR + + +class FilesysTests(unittest.TestCase): + def test_get_file_size_returns_correct_size(self): + target_size = 1000 # bytes + + with BytesIO() as f: + f.seek(target_size - 1) + f.write(b"\0") + file_size = get_file_object_size(f) + + self.assertEqual(file_size, target_size) + + def test_get_file_size_returns_zero_for_empty_file(self): + with BytesIO() as f: + file_size = get_file_object_size(f) + + self.assertEqual(file_size, 0) + + def test_get_file_size_coincides_with_built_in_method(self): + asset_path = asset("SampleWB.twbx") + target_size = os.path.getsize(asset_path) + with open(asset_path, "rb") as f: + file_size = get_file_object_size(f) + + self.assertEqual(file_size, target_size) + + def test_get_file_type_identifies_a_zip_file(self): + with BytesIO() as file_object: + with ZipFile(file_object, "w") as zf: + with BytesIO() as stream: + stream.write(b"This is a zip file") + zf.writestr("dummy_file", stream.getbuffer()) + file_object.seek(0) + file_type = get_file_type(file_object) + + self.assertEqual(file_type, "zip") + + def test_get_file_type_identifies_tdsx_as_zip_file(self): + with open(asset("World Indicators.tdsx"), "rb") as file_object: + file_type = get_file_type(file_object) + self.assertEqual(file_type, "zip") + + def test_get_file_type_identifies_twbx_as_zip_file(self): + with open(asset("SampleWB.twbx"), "rb") as file_object: + file_type = get_file_type(file_object) + self.assertEqual(file_type, "zip") + + def test_get_file_type_identifies_xml_file(self): + root = ET.Element("root") + child = ET.SubElement(root, "child") + child.text = "This is a child element" + etree = ET.ElementTree(root) + + with BytesIO() as file_object: + etree.write(file_object, encoding="utf-8", xml_declaration=True) + + file_object.seek(0) + file_type = get_file_type(file_object) + + self.assertEqual(file_type, "xml") + + def test_get_file_type_identifies_tds_as_xml_file(self): + with open(asset("World Indicators.tds"), "rb") as file_object: + file_type = get_file_type(file_object) + self.assertEqual(file_type, "xml") + + def test_get_file_type_identifies_twb_as_xml_file(self): + with open(asset("RESTAPISample.twb"), "rb") as file_object: + file_type = get_file_type(file_object) + self.assertEqual(file_type, "xml") + + def test_get_file_type_identifies_hyper_file(self): + with open(asset("World Indicators.hyper"), "rb") as file_object: + file_type = get_file_type(file_object) + self.assertEqual(file_type, "hyper") + + def test_get_file_type_identifies_tde_file(self): + asset_path = os.path.join(TEST_ASSET_DIR, "Data", "Tableau Samples", "World Indicators.tde") + with open(asset_path, "rb") as file_object: + file_type = get_file_type(file_object) + self.assertEqual(file_type, "tde") + + def test_get_file_type_handles_unknown_file_type(self): + # Create a dummy png file + with BytesIO() as file_object: + png_signature = bytes.fromhex("89504E470D0A1A0A") + file_object.write(png_signature) + file_object.seek(0) + + self.assertRaises(ValueError, get_file_type, file_object) diff --git a/test/test_fileuploads.py b/test/test_fileuploads.py new file mode 100644 index 000000000..9567bc3ad --- /dev/null +++ b/test/test_fileuploads.py @@ -0,0 +1,89 @@ +import contextlib +import io +import os +import unittest + +import requests_mock + +from tableauserverclient.config import BYTES_PER_MB, config +from tableauserverclient.server import Server +from ._utils import asset + +TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets") +FILEUPLOAD_INITIALIZE = os.path.join(TEST_ASSET_DIR, "fileupload_initialize.xml") +FILEUPLOAD_APPEND = os.path.join(TEST_ASSET_DIR, "fileupload_append.xml") + + +@contextlib.contextmanager +def set_env(**environ): + old_environ = dict(os.environ) + os.environ.update(environ) + try: + yield + finally: + os.environ.clear() + os.environ.update(old_environ) + + +class FileuploadsTests(unittest.TestCase): + def setUp(self): + self.server = Server("http://test", False) + + # Fake sign in + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + + self.baseurl = f"{self.server.baseurl}/sites/{self.server.site_id}/fileUploads" + + def test_read_chunks_file_path(self): + file_path = asset("SampleWB.twbx") + chunks = self.server.fileuploads._read_chunks(file_path) + for chunk in chunks: + self.assertIsNotNone(chunk) + + def test_read_chunks_file_object(self): + with open(asset("SampleWB.twbx"), "rb") as f: + chunks = self.server.fileuploads._read_chunks(f) + for chunk in chunks: + self.assertIsNotNone(chunk) + + def test_upload_chunks_file_path(self): + file_path = asset("SampleWB.twbx") + upload_id = "7720:170fe6b1c1c7422dadff20f944d58a52-1:0" + + with open(FILEUPLOAD_INITIALIZE, "rb") as f: + initialize_response_xml = f.read().decode("utf-8") + with open(FILEUPLOAD_APPEND, "rb") as f: + append_response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.baseurl, text=initialize_response_xml) + m.put(f"{self.baseurl}/{upload_id}", text=append_response_xml) + actual = self.server.fileuploads.upload(file_path) + + self.assertEqual(upload_id, actual) + + def test_upload_chunks_file_object(self): + upload_id = "7720:170fe6b1c1c7422dadff20f944d58a52-1:0" + + with open(asset("SampleWB.twbx"), "rb") as file_content: + with open(FILEUPLOAD_INITIALIZE, "rb") as f: + initialize_response_xml = f.read().decode("utf-8") + with open(FILEUPLOAD_APPEND, "rb") as f: + append_response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.baseurl, text=initialize_response_xml) + m.put(f"{self.baseurl}/{upload_id}", text=append_response_xml) + actual = self.server.fileuploads.upload(file_content) + + self.assertEqual(upload_id, actual) + + def test_upload_chunks_config(self): + data = io.BytesIO() + data.write(b"1" * (config.CHUNK_SIZE_MB * BYTES_PER_MB + 1)) + data.seek(0) + with set_env(TSC_CHUNK_SIZE_MB="1"): + chunker = self.server.fileuploads._read_chunks(data) + chunk = next(chunker) + assert len(chunk) == config.CHUNK_SIZE_MB * BYTES_PER_MB + data.seek(0) + assert len(chunk) < len(data.read()) diff --git a/test/test_filter.py b/test/test_filter.py new file mode 100644 index 000000000..e2121307f --- /dev/null +++ b/test/test_filter.py @@ -0,0 +1,22 @@ +import os +import unittest + +import tableauserverclient as TSC + + +class FilterTests(unittest.TestCase): + def setUp(self): + pass + + def test_filter_equal(self): + filter = TSC.Filter(TSC.RequestOptions.Field.Name, TSC.RequestOptions.Operator.Equals, "Superstore") + + self.assertEqual(str(filter), "name:eq:Superstore") + + def test_filter_in(self): + # create a IN filter condition with project names that + # contain spaces and "special" characters + projects_to_find = ["default", "Salesforce Sales Projeśt"] + filter = TSC.Filter(TSC.RequestOptions.Field.Name, TSC.RequestOptions.Operator.In, projects_to_find) + + self.assertEqual(str(filter), "name:in:[default,Salesforce Sales Projeśt]") diff --git a/test/test_flow.py b/test/test_flow.py new file mode 100644 index 000000000..d458bc77b --- /dev/null +++ b/test/test_flow.py @@ -0,0 +1,225 @@ +import os +import requests_mock +import tempfile +import unittest + +from io import BytesIO + +import tableauserverclient as TSC +from tableauserverclient.datetime_helpers import format_datetime +from ._utils import read_xml_asset, asset + +TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets") + +GET_XML = os.path.join(TEST_ASSET_DIR, "flow_get.xml") +POPULATE_CONNECTIONS_XML = os.path.join(TEST_ASSET_DIR, "flow_populate_connections.xml") +POPULATE_PERMISSIONS_XML = os.path.join(TEST_ASSET_DIR, "flow_populate_permissions.xml") +PUBLISH_XML = os.path.join(TEST_ASSET_DIR, "flow_publish.xml") +UPDATE_XML = os.path.join(TEST_ASSET_DIR, "flow_update.xml") +REFRESH_XML = os.path.join(TEST_ASSET_DIR, "flow_refresh.xml") + + +class FlowTests(unittest.TestCase): + def setUp(self) -> None: + self.server = TSC.Server("http://test", False) + + # Fake signin + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + self.server.version = "3.5" + + self.baseurl = self.server.flows.baseurl + + def test_download(self) -> None: + with requests_mock.mock() as m: + m.get( + self.baseurl + "/587daa37-b84d-4400-a9a2-aa90e0be7837/content", + headers={"Content-Disposition": 'name="tableau_flow"; filename="FlowOne.tfl"'}, + ) + file_path = self.server.flows.download("587daa37-b84d-4400-a9a2-aa90e0be7837") + self.assertTrue(os.path.exists(file_path)) + os.remove(file_path) + + def test_download_object(self) -> None: + with BytesIO() as file_object: + with requests_mock.mock() as m: + m.get( + self.baseurl + "/587daa37-b84d-4400-a9a2-aa90e0be7837/content", + headers={"Content-Disposition": 'name="tableau_flow"; filename="FlowOne.tfl"'}, + ) + file_path = self.server.flows.download("587daa37-b84d-4400-a9a2-aa90e0be7837", filepath=file_object) + self.assertTrue(isinstance(file_path, BytesIO)) + + def test_get(self) -> None: + response_xml = read_xml_asset(GET_XML) + with requests_mock.mock() as m: + m.get(self.baseurl, text=response_xml) + all_flows, pagination_item = self.server.flows.get() + + self.assertEqual(5, pagination_item.total_available) + self.assertEqual("587daa37-b84d-4400-a9a2-aa90e0be7837", all_flows[0].id) + self.assertEqual("http://tableauserver/#/flows/1", all_flows[0].webpage_url) + self.assertEqual("2019-06-16T21:43:28Z", format_datetime(all_flows[0].created_at)) + self.assertEqual("2019-06-16T21:43:28Z", format_datetime(all_flows[0].updated_at)) + self.assertEqual("Default", all_flows[0].project_name) + self.assertEqual("FlowOne", all_flows[0].name) + self.assertEqual("aa23f4ac-906f-11e9-86fb-3f0f71412e77", all_flows[0].project_id) + self.assertEqual("7ebb3f20-0fd2-4f27-a2f6-c539470999e2", all_flows[0].owner_id) + self.assertEqual({"i_love_tags"}, all_flows[0].tags) + self.assertEqual("Descriptive", all_flows[0].description) + + self.assertEqual("5c36be69-eb30-461b-b66e-3e2a8e27cc35", all_flows[1].id) + self.assertEqual("http://tableauserver/#/flows/4", all_flows[1].webpage_url) + self.assertEqual("2019-06-18T03:08:19Z", format_datetime(all_flows[1].created_at)) + self.assertEqual("2019-06-18T03:08:19Z", format_datetime(all_flows[1].updated_at)) + self.assertEqual("Default", all_flows[1].project_name) + self.assertEqual("FlowTwo", all_flows[1].name) + self.assertEqual("aa23f4ac-906f-11e9-86fb-3f0f71412e77", all_flows[1].project_id) + self.assertEqual("9127d03f-d996-405f-b392-631b25183a0f", all_flows[1].owner_id) + + def test_update(self) -> None: + response_xml = read_xml_asset(UPDATE_XML) + with requests_mock.mock() as m: + m.put(self.baseurl + "/587daa37-b84d-4400-a9a2-aa90e0be7837", text=response_xml) + single_datasource = TSC.FlowItem("test", "aa23f4ac-906f-11e9-86fb-3f0f71412e77") + single_datasource.owner_id = "7ebb3f20-0fd2-4f27-a2f6-c539470999e2" + single_datasource._id = "587daa37-b84d-4400-a9a2-aa90e0be7837" + single_datasource.description = "So fun to see" + single_datasource = self.server.flows.update(single_datasource) + + self.assertEqual("587daa37-b84d-4400-a9a2-aa90e0be7837", single_datasource.id) + self.assertEqual("aa23f4ac-906f-11e9-86fb-3f0f71412e77", single_datasource.project_id) + self.assertEqual("7ebb3f20-0fd2-4f27-a2f6-c539470999e2", single_datasource.owner_id) + self.assertEqual("So fun to see", single_datasource.description) + + def test_populate_connections(self) -> None: + response_xml = read_xml_asset(POPULATE_CONNECTIONS_XML) + with requests_mock.mock() as m: + m.get(self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/connections", text=response_xml) + single_datasource = TSC.FlowItem("test", "aa23f4ac-906f-11e9-86fb-3f0f71412e77") + single_datasource.owner_id = "dd2239f6-ddf1-4107-981a-4cf94e415794" + single_datasource._id = "9dbd2263-16b5-46e1-9c43-a76bb8ab65fb" + self.server.flows.populate_connections(single_datasource) + self.assertEqual("9dbd2263-16b5-46e1-9c43-a76bb8ab65fb", single_datasource.id) + connections = single_datasource.connections + + self.assertTrue(connections) + conn1, conn2, conn3 = connections + self.assertEqual("405c1e4b-60c9-499f-9c47-a4ef1af69359", conn1.id) + self.assertEqual("excel-direct", conn1.connection_type) + self.assertEqual("", conn1.server_address) + self.assertEqual("", conn1.username) + self.assertEqual(False, conn1.embed_password) + self.assertEqual("b47f41b1-2c47-41a3-8b17-a38ebe8b340c", conn2.id) + self.assertEqual("sqlserver", conn2.connection_type) + self.assertEqual("test.database.com", conn2.server_address) + self.assertEqual("bob", conn2.username) + self.assertEqual(False, conn2.embed_password) + self.assertEqual("4f4a3b78-0554-43a7-b327-9605e9df9dd2", conn3.id) + self.assertEqual("tableau-server-site", conn3.connection_type) + self.assertEqual("http://tableauserver", conn3.server_address) + self.assertEqual("sally", conn3.username) + self.assertEqual(True, conn3.embed_password) + + def test_populate_permissions(self) -> None: + with open(asset(POPULATE_PERMISSIONS_XML), "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.baseurl + "/0448d2ed-590d-4fa0-b272-a2a8a24555b5/permissions", text=response_xml) + single_datasource = TSC.FlowItem("test") + single_datasource._id = "0448d2ed-590d-4fa0-b272-a2a8a24555b5" + + self.server.flows.populate_permissions(single_datasource) + permissions = single_datasource.permissions + + self.assertEqual(permissions[0].grantee.tag_name, "group") + self.assertEqual(permissions[0].grantee.id, "aa42f384-906f-11e9-86fc-bb24278874b9") + self.assertDictEqual( + permissions[0].capabilities, + { + TSC.Permission.Capability.Write: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Read: TSC.Permission.Mode.Allow, + }, + ) + + self.assertEqual(permissions[1].grantee.tag_name, "groupSet") + self.assertEqual(permissions[1].grantee.id, "7ea95a1b-6872-44d6-a969-68598a7df4a0") + self.assertDictEqual( + permissions[1].capabilities, + { + TSC.Permission.Capability.Write: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Read: TSC.Permission.Mode.Allow, + }, + ) + + def test_publish(self) -> None: + with open(PUBLISH_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.baseurl, text=response_xml) + + new_flow = TSC.FlowItem(name="SampleFlow", project_id="ee8c6e70-43b6-11e6-af4f-f7b0d8e20760") + + sample_flow = os.path.join(TEST_ASSET_DIR, "SampleFlow.tfl") + publish_mode = self.server.PublishMode.CreateNew + + new_flow = self.server.flows.publish(new_flow, sample_flow, publish_mode) + + self.assertEqual("2457c468-1b24-461a-8f95-a461b3209d32", new_flow.id) + self.assertEqual("SampleFlow", new_flow.name) + self.assertEqual("2023-01-13T09:50:55Z", format_datetime(new_flow.created_at)) + self.assertEqual("2023-01-13T09:50:55Z", format_datetime(new_flow.updated_at)) + self.assertEqual("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", new_flow.project_id) + self.assertEqual("default", new_flow.project_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", new_flow.owner_id) + + def test_publish_file_object(self) -> None: + with open(PUBLISH_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.baseurl, text=response_xml) + + new_flow = TSC.FlowItem(name="SampleFlow", project_id="ee8c6e70-43b6-11e6-af4f-f7b0d8e20760") + + sample_flow = os.path.join(TEST_ASSET_DIR, "SampleFlow.tfl") + publish_mode = self.server.PublishMode.CreateNew + + with open(sample_flow, "rb") as fp: + publish_mode = self.server.PublishMode.CreateNew + + new_flow = self.server.flows.publish(new_flow, fp, publish_mode) + + self.assertEqual("2457c468-1b24-461a-8f95-a461b3209d32", new_flow.id) + self.assertEqual("SampleFlow", new_flow.name) + self.assertEqual("2023-01-13T09:50:55Z", format_datetime(new_flow.created_at)) + self.assertEqual("2023-01-13T09:50:55Z", format_datetime(new_flow.updated_at)) + self.assertEqual("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", new_flow.project_id) + self.assertEqual("default", new_flow.project_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", new_flow.owner_id) + + def test_refresh(self): + with open(asset(REFRESH_XML), "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.baseurl + "/92967d2d-c7e2-46d0-8847-4802df58f484/run", text=response_xml) + flow_item = TSC.FlowItem("test") + flow_item._id = "92967d2d-c7e2-46d0-8847-4802df58f484" + refresh_job = self.server.flows.refresh(flow_item) + + self.assertEqual(refresh_job.id, "d1b2ccd0-6dfa-444a-aee4-723dbd6b7c9d") + self.assertEqual(refresh_job.mode, "Asynchronous") + self.assertEqual(refresh_job.type, "RunFlow") + self.assertEqual(format_datetime(refresh_job.created_at), "2018-05-22T13:00:29Z") + self.assertIsInstance(refresh_job.flow_run, TSC.FlowRunItem) + self.assertEqual(refresh_job.flow_run.id, "e0c3067f-2333-4eee-8028-e0a56ca496f6") + self.assertEqual(refresh_job.flow_run.flow_id, "92967d2d-c7e2-46d0-8847-4802df58f484") + self.assertEqual(format_datetime(refresh_job.flow_run.started_at), "2018-05-22T13:00:29Z") + + def test_bad_download_response(self) -> None: + with requests_mock.mock() as m, tempfile.TemporaryDirectory() as td: + m.get( + self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/content", + headers={"Content-Disposition": '''name="tableau_flow"; filename*=UTF-8''"Sample flow.tfl"'''}, + ) + file_path = self.server.flows.download("9dbd2263-16b5-46e1-9c43-a76bb8ab65fb", td) + self.assertTrue(os.path.exists(file_path)) diff --git a/test/test_flowruns.py b/test/test_flowruns.py new file mode 100644 index 000000000..8af2540dc --- /dev/null +++ b/test/test_flowruns.py @@ -0,0 +1,111 @@ +import sys +import unittest + +import requests_mock + +import tableauserverclient as TSC +from tableauserverclient.datetime_helpers import format_datetime +from tableauserverclient.server.endpoint.exceptions import FlowRunFailedException +from ._utils import read_xml_asset, mocked_time, server_response_error_factory + +GET_XML = "flow_runs_get.xml" +GET_BY_ID_XML = "flow_runs_get_by_id.xml" +GET_BY_ID_FAILED_XML = "flow_runs_get_by_id_failed.xml" +GET_BY_ID_INPROGRESS_XML = "flow_runs_get_by_id_inprogress.xml" + + +class FlowRunTests(unittest.TestCase): + def setUp(self) -> None: + self.server = TSC.Server("http://test", False) + + # Fake signin + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + self.server.version = "3.10" + + self.baseurl = self.server.flow_runs.baseurl + + def test_get(self) -> None: + response_xml = read_xml_asset(GET_XML) + with requests_mock.mock() as m: + m.get(self.baseurl, text=response_xml) + all_flow_runs = self.server.flow_runs.get() + + self.assertEqual("cc2e652d-4a9b-4476-8c93-b238c45db968", all_flow_runs[0].id) + self.assertEqual("2021-02-11T01:42:55Z", format_datetime(all_flow_runs[0].started_at)) + self.assertEqual("2021-02-11T01:57:38Z", format_datetime(all_flow_runs[0].completed_at)) + self.assertEqual("Success", all_flow_runs[0].status) + self.assertEqual("100", all_flow_runs[0].progress) + self.assertEqual("aa23f4ac-906f-11e9-86fb-3f0f71412e77", all_flow_runs[0].background_job_id) + + self.assertEqual("a3104526-c0c6-4ea5-8362-e03fc7cbd7ee", all_flow_runs[1].id) + self.assertEqual("2021-02-13T04:05:30Z", format_datetime(all_flow_runs[1].started_at)) + self.assertEqual("2021-02-13T04:05:35Z", format_datetime(all_flow_runs[1].completed_at)) + self.assertEqual("Failed", all_flow_runs[1].status) + self.assertEqual("100", all_flow_runs[1].progress) + self.assertEqual("1ad21a9d-2530-4fbf-9064-efd3c736e023", all_flow_runs[1].background_job_id) + + def test_get_by_id(self) -> None: + response_xml = read_xml_asset(GET_BY_ID_XML) + with requests_mock.mock() as m: + m.get(self.baseurl + "/cc2e652d-4a9b-4476-8c93-b238c45db968", text=response_xml) + flow_run = self.server.flow_runs.get_by_id("cc2e652d-4a9b-4476-8c93-b238c45db968") + + self.assertEqual("cc2e652d-4a9b-4476-8c93-b238c45db968", flow_run.id) + self.assertEqual("2021-02-11T01:42:55Z", format_datetime(flow_run.started_at)) + self.assertEqual("2021-02-11T01:57:38Z", format_datetime(flow_run.completed_at)) + self.assertEqual("Success", flow_run.status) + self.assertEqual("100", flow_run.progress) + self.assertEqual("1ad21a9d-2530-4fbf-9064-efd3c736e023", flow_run.background_job_id) + + def test_cancel_id(self) -> None: + with requests_mock.mock() as m: + m.put(self.baseurl + "/ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", status_code=204) + self.server.flow_runs.cancel("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760") + + def test_cancel_item(self) -> None: + run = TSC.FlowRunItem() + run._id = "ee8c6e70-43b6-11e6-af4f-f7b0d8e20760" + with requests_mock.mock() as m: + m.put(self.baseurl + "/ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", status_code=204) + self.server.flow_runs.cancel(run) + + def test_wait_for_job_finished(self) -> None: + # Waiting for an already finished job, directly returns that job's info + response_xml = read_xml_asset(GET_BY_ID_XML) + flow_run_id = "cc2e652d-4a9b-4476-8c93-b238c45db968" + with mocked_time(), requests_mock.mock() as m: + m.get(f"{self.baseurl}/{flow_run_id}", text=response_xml) + flow_run = self.server.flow_runs.wait_for_job(flow_run_id) + + self.assertEqual(flow_run_id, flow_run.id) + self.assertEqual(flow_run.progress, "100") + + def test_wait_for_job_failed(self) -> None: + # Waiting for a failed job raises an exception + response_xml = read_xml_asset(GET_BY_ID_FAILED_XML) + flow_run_id = "c2b35d5a-e130-471a-aec8-7bc5435fe0e7" + with mocked_time(), requests_mock.mock() as m: + m.get(f"{self.baseurl}/{flow_run_id}", text=response_xml) + with self.assertRaises(FlowRunFailedException): + self.server.flow_runs.wait_for_job(flow_run_id) + + def test_wait_for_job_timeout(self) -> None: + # Waiting for a job which doesn't terminate will throw an exception + response_xml = read_xml_asset(GET_BY_ID_INPROGRESS_XML) + flow_run_id = "71afc22c-9c06-40be-8d0f-4c4166d29e6c" + with mocked_time(), requests_mock.mock() as m: + m.get(f"{self.baseurl}/{flow_run_id}", text=response_xml) + with self.assertRaises(TimeoutError): + self.server.flow_runs.wait_for_job(flow_run_id, timeout=30) + + def test_queryset(self) -> None: + response_xml = read_xml_asset(GET_XML) + error_response = server_response_error_factory( + "400006", "Bad Request", "0xB4EAB088 : The start index '9900' is greater than or equal to the total count.)" + ) + with requests_mock.mock() as m: + m.get(f"{self.baseurl}?pageNumber=1", text=response_xml) + m.get(f"{self.baseurl}?pageNumber=2", text=error_response) + queryset = self.server.flow_runs.all() + assert len(queryset) == sys.maxsize diff --git a/test/test_flowtask.py b/test/test_flowtask.py new file mode 100644 index 000000000..2d9f7c7bd --- /dev/null +++ b/test/test_flowtask.py @@ -0,0 +1,47 @@ +import os +import unittest +from datetime import time +from pathlib import Path + +import requests_mock + +import tableauserverclient as TSC +from tableauserverclient.datetime_helpers import parse_datetime +from tableauserverclient.models.task_item import TaskItem + +TEST_ASSET_DIR = Path(__file__).parent / "assets" +GET_XML_CREATE_FLOW_TASK_RESPONSE = os.path.join(TEST_ASSET_DIR, "tasks_create_flow_task.xml") + + +class TaskTests(unittest.TestCase): + def setUp(self): + self.server = TSC.Server("http://test", False) + self.server.version = "3.22" + + # Fake Signin + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + + self.baseurl = self.server.flow_tasks.baseurl + + def test_create_flow_task(self): + monthly_interval = TSC.MonthlyInterval(start_time=time(23, 30), interval_value=15) + monthly_schedule = TSC.ScheduleItem( + "Monthly Schedule", + 50, + TSC.ScheduleItem.Type.Flow, + TSC.ScheduleItem.ExecutionOrder.Parallel, + monthly_interval, + ) + target_item = TSC.Target("flow_id", "flow") + + task = TaskItem(None, "RunFlow", None, schedule_item=monthly_schedule, target=target_item) + + with open(GET_XML_CREATE_FLOW_TASK_RESPONSE, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(f"{self.baseurl}", text=response_xml) + create_response_content = self.server.flow_tasks.create(task).decode("utf-8") + + self.assertTrue("schedule_id" in create_response_content) + self.assertTrue("flow_id" in create_response_content) diff --git a/test/test_group.py b/test/test_group.py index 7096ca408..41b5992be 100644 --- a/test/test_group.py +++ b/test/test_group.py @@ -1,199 +1,312 @@ -# encoding=utf-8 +from pathlib import Path import unittest import os import requests_mock import tableauserverclient as TSC from tableauserverclient.datetime_helpers import format_datetime -TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets') +TEST_ASSET_DIR = Path(__file__).absolute().parent / "assets" -GET_XML = os.path.join(TEST_ASSET_DIR, 'group_get.xml') -POPULATE_USERS = os.path.join(TEST_ASSET_DIR, 'group_populate_users.xml') -POPULATE_USERS_EMPTY = os.path.join(TEST_ASSET_DIR, 'group_populate_users_empty.xml') -ADD_USER = os.path.join(TEST_ASSET_DIR, 'group_add_user.xml') -ADD_USER_POPULATE = os.path.join(TEST_ASSET_DIR, 'group_users_added.xml') -CREATE_GROUP = os.path.join(TEST_ASSET_DIR, 'group_create.xml') -CREATE_GROUP_ASYNC = os.path.join(TEST_ASSET_DIR, 'group_create_async.xml') -UPDATE_XML = os.path.join(TEST_ASSET_DIR, 'group_update.xml') +# TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets") + +GET_XML = os.path.join(TEST_ASSET_DIR, "group_get.xml") +POPULATE_USERS = os.path.join(TEST_ASSET_DIR, "group_populate_users.xml") +POPULATE_USERS_EMPTY = os.path.join(TEST_ASSET_DIR, "group_populate_users_empty.xml") +ADD_USER = os.path.join(TEST_ASSET_DIR, "group_add_user.xml") +ADD_USERS = TEST_ASSET_DIR / "group_add_users.xml" +ADD_USER_POPULATE = os.path.join(TEST_ASSET_DIR, "group_users_added.xml") +CREATE_GROUP = os.path.join(TEST_ASSET_DIR, "group_create.xml") +CREATE_GROUP_AD = os.path.join(TEST_ASSET_DIR, "group_create_ad.xml") +CREATE_GROUP_ASYNC = os.path.join(TEST_ASSET_DIR, "group_create_async.xml") +UPDATE_XML = os.path.join(TEST_ASSET_DIR, "group_update.xml") +UPDATE_ASYNC_XML = TEST_ASSET_DIR / "group_update_async.xml" class GroupTests(unittest.TestCase): - def setUp(self): - self.server = TSC.Server('http://test') + def setUp(self) -> None: + self.server = TSC.Server("http://test", False) # Fake signin - self.server._site_id = 'dad65087-b08b-4603-af4e-2887b8aafc67' - self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM' + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" self.baseurl = self.server.groups.baseurl - def test_get(self): - with open(GET_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_get(self) -> None: + with open(GET_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.get(self.baseurl, text=response_xml) all_groups, pagination_item = self.server.groups.get() self.assertEqual(3, pagination_item.total_available) - self.assertEqual('ef8b19c0-43b6-11e6-af50-63f5805dbe3c', all_groups[0].id) - self.assertEqual('All Users', all_groups[0].name) - self.assertEqual('local', all_groups[0].domain_name) + self.assertEqual("ef8b19c0-43b6-11e6-af50-63f5805dbe3c", all_groups[0].id) + self.assertEqual("All Users", all_groups[0].name) + self.assertEqual("local", all_groups[0].domain_name) - self.assertEqual('e7833b48-c6f7-47b5-a2a7-36e7dd232758', all_groups[1].id) - self.assertEqual('Another group', all_groups[1].name) - self.assertEqual('local', all_groups[1].domain_name) + self.assertEqual("e7833b48-c6f7-47b5-a2a7-36e7dd232758", all_groups[1].id) + self.assertEqual("Another group", all_groups[1].name) + self.assertEqual("local", all_groups[1].domain_name) - self.assertEqual('86a66d40-f289-472a-83d0-927b0f954dc8', all_groups[2].id) - self.assertEqual('TableauExample', all_groups[2].name) - self.assertEqual('local', all_groups[2].domain_name) + self.assertEqual("86a66d40-f289-472a-83d0-927b0f954dc8", all_groups[2].id) + self.assertEqual("TableauExample", all_groups[2].name) + self.assertEqual("local", all_groups[2].domain_name) - def test_get_before_signin(self): + def test_get_before_signin(self) -> None: self.server._auth_token = None self.assertRaises(TSC.NotSignedInError, self.server.groups.get) - def test_populate_users(self): - with open(POPULATE_USERS, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_populate_users(self) -> None: + with open(POPULATE_USERS, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.baseurl + '/e7833b48-c6f7-47b5-a2a7-36e7dd232758/users?pageNumber=1&pageSize=100', - text=response_xml, complete_qs=True) - single_group = TSC.GroupItem(name='Test Group') - single_group._id = 'e7833b48-c6f7-47b5-a2a7-36e7dd232758' + m.get( + self.baseurl + "/e7833b48-c6f7-47b5-a2a7-36e7dd232758/users?pageNumber=1&pageSize=100", + text=response_xml, + complete_qs=True, + ) + single_group = TSC.GroupItem(name="Test Group") + single_group._id = "e7833b48-c6f7-47b5-a2a7-36e7dd232758" self.server.groups.populate_users(single_group) self.assertEqual(1, len(list(single_group.users))) user = list(single_group.users).pop() - self.assertEqual('dd2239f6-ddf1-4107-981a-4cf94e415794', user.id) - self.assertEqual('alice', user.name) - self.assertEqual('Publisher', user.site_role) - self.assertEqual('2016-08-16T23:17:06Z', format_datetime(user.last_login)) + self.assertEqual("dd2239f6-ddf1-4107-981a-4cf94e415794", user.id) + self.assertEqual("alice", user.name) + self.assertEqual("Publisher", user.site_role) + self.assertEqual("2016-08-16T23:17:06Z", format_datetime(user.last_login)) - def test_delete(self): + def test_delete(self) -> None: with requests_mock.mock() as m: - m.delete(self.baseurl + '/e7833b48-c6f7-47b5-a2a7-36e7dd232758', status_code=204) - self.server.groups.delete('e7833b48-c6f7-47b5-a2a7-36e7dd232758') + m.delete(self.baseurl + "/e7833b48-c6f7-47b5-a2a7-36e7dd232758", status_code=204) + self.server.groups.delete("e7833b48-c6f7-47b5-a2a7-36e7dd232758") - def test_remove_user(self): - with open(POPULATE_USERS, 'rb') as f: - response_xml_populate = f.read().decode('utf-8') + def test_remove_user(self) -> None: + with open(POPULATE_USERS, "rb") as f: + response_xml_populate = f.read().decode("utf-8") - with open(POPULATE_USERS_EMPTY, 'rb') as f: - response_xml_empty = f.read().decode('utf-8') + with open(POPULATE_USERS_EMPTY, "rb") as f: + response_xml_empty = f.read().decode("utf-8") with requests_mock.mock() as m: - url = self.baseurl + '/e7833b48-c6f7-47b5-a2a7-36e7dd232758/users' \ - '/dd2239f6-ddf1-4107-981a-4cf94e415794' + url = self.baseurl + "/e7833b48-c6f7-47b5-a2a7-36e7dd232758/users" "/dd2239f6-ddf1-4107-981a-4cf94e415794" m.delete(url, status_code=204) # We register the get endpoint twice. The first time we have 1 user, the second we have 'removed' them. - m.get(self.baseurl + '/e7833b48-c6f7-47b5-a2a7-36e7dd232758/users', text=response_xml_populate) + m.get(self.baseurl + "/e7833b48-c6f7-47b5-a2a7-36e7dd232758/users", text=response_xml_populate) - single_group = TSC.GroupItem('test') - single_group._id = 'e7833b48-c6f7-47b5-a2a7-36e7dd232758' + single_group = TSC.GroupItem("test") + single_group._id = "e7833b48-c6f7-47b5-a2a7-36e7dd232758" self.server.groups.populate_users(single_group) self.assertEqual(1, len(list(single_group.users))) - self.server.groups.remove_user(single_group, 'dd2239f6-ddf1-4107-981a-4cf94e415794') + self.server.groups.remove_user(single_group, "dd2239f6-ddf1-4107-981a-4cf94e415794") - m.get(self.baseurl + '/e7833b48-c6f7-47b5-a2a7-36e7dd232758/users', text=response_xml_empty) + m.get(self.baseurl + "/e7833b48-c6f7-47b5-a2a7-36e7dd232758/users", text=response_xml_empty) self.assertEqual(0, len(list(single_group.users))) - def test_add_user(self): - with open(ADD_USER, 'rb') as f: - response_xml_add = f.read().decode('utf-8') - with open(ADD_USER_POPULATE, 'rb') as f: - response_xml_populate = f.read().decode('utf-8') + def test_add_user(self) -> None: + with open(ADD_USER, "rb") as f: + response_xml_add = f.read().decode("utf-8") + with open(ADD_USER_POPULATE, "rb") as f: + response_xml_populate = f.read().decode("utf-8") with requests_mock.mock() as m: - m.post(self.baseurl + '/e7833b48-c6f7-47b5-a2a7-36e7dd232758/users', text=response_xml_add) - m.get(self.baseurl + '/e7833b48-c6f7-47b5-a2a7-36e7dd232758/users', text=response_xml_populate) - single_group = TSC.GroupItem('test') - single_group._id = 'e7833b48-c6f7-47b5-a2a7-36e7dd232758' + m.post(self.baseurl + "/e7833b48-c6f7-47b5-a2a7-36e7dd232758/users", text=response_xml_add) + m.get(self.baseurl + "/e7833b48-c6f7-47b5-a2a7-36e7dd232758/users", text=response_xml_populate) + single_group = TSC.GroupItem("test") + single_group._id = "e7833b48-c6f7-47b5-a2a7-36e7dd232758" - self.server.groups.add_user(single_group, '5de011f8-5aa9-4d5b-b991-f462c8dd6bb7') + self.server.groups.add_user(single_group, "5de011f8-5aa9-4d5b-b991-f462c8dd6bb7") self.server.groups.populate_users(single_group) self.assertEqual(1, len(list(single_group.users))) user = list(single_group.users).pop() - self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', user.id) - self.assertEqual('testuser', user.name) - self.assertEqual('ServerAdministrator', user.site_role) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", user.id) + self.assertEqual("testuser", user.name) + self.assertEqual("ServerAdministrator", user.site_role) + + def test_add_users(self) -> None: + self.server.version = "3.21" + self.baseurl = self.server.groups.baseurl + + def make_user(id: str, name: str, siteRole: str) -> TSC.UserItem: + user = TSC.UserItem(name, siteRole) + user._id = id + return user + + users = [ + make_user(id="5de011f8-4aa9-4d5b-b991-f464c8dd6bb7", name="Alice", siteRole="ServerAdministrator"), + make_user(id="5de011f8-3aa9-4d5b-b991-f467c8dd6bb8", name="Bob", siteRole="Explorer"), + make_user(id="5de011f8-2aa9-4d5b-b991-f466c8dd6bb8", name="Charlie", siteRole="Viewer"), + ] + group = TSC.GroupItem("test") + group._id = "e7833b48-c6f7-47b5-a2a7-36e7dd232758" + + with requests_mock.mock() as m: + m.post(f"{self.baseurl}/{group.id}/users", text=ADD_USERS.read_text()) + resp_users = self.server.groups.add_users(group, users) + + for user, resp_user in zip(users, resp_users): + with self.subTest(user=user, resp_user=resp_user): + assert user.id == resp_user.id + assert user.name == resp_user.name + assert user.site_role == resp_user.site_role + + def test_remove_users(self) -> None: + self.server.version = "3.21" + self.baseurl = self.server.groups.baseurl + + def make_user(id: str, name: str, siteRole: str) -> TSC.UserItem: + user = TSC.UserItem(name, siteRole) + user._id = id + return user + + users = [ + make_user(id="5de011f8-4aa9-4d5b-b991-f464c8dd6bb7", name="Alice", siteRole="ServerAdministrator"), + make_user(id="5de011f8-3aa9-4d5b-b991-f467c8dd6bb8", name="Bob", siteRole="Explorer"), + make_user(id="5de011f8-2aa9-4d5b-b991-f466c8dd6bb8", name="Charlie", siteRole="Viewer"), + ] + group = TSC.GroupItem("test") + group._id = "e7833b48-c6f7-47b5-a2a7-36e7dd232758" - def test_add_user_before_populating(self): - with open(GET_XML, 'rb') as f: - get_xml_response = f.read().decode('utf-8') - with open(ADD_USER, 'rb') as f: - add_user_response = f.read().decode('utf-8') + with requests_mock.mock() as m: + m.put(f"{self.baseurl}/{group.id}/users/remove") + self.server.groups.remove_users(group, users) + + def test_add_user_before_populating(self) -> None: + with open(GET_XML, "rb") as f: + get_xml_response = f.read().decode("utf-8") + with open(ADD_USER, "rb") as f: + add_user_response = f.read().decode("utf-8") with requests_mock.mock() as m: m.get(self.baseurl, text=get_xml_response) - m.post('http://test/api/2.3/sites/dad65087-b08b-4603-af4e-2887b8aafc67/groups/ef8b19c0-43b6-11e6-af50' - '-63f5805dbe3c/users', text=add_user_response) + m.post( + self.baseurl + "/ef8b19c0-43b6-11e6-af50-63f5805dbe3c/users", + text=add_user_response, + ) all_groups, pagination_item = self.server.groups.get() single_group = all_groups[0] - self.server.groups.add_user(single_group, '5de011f8-5aa9-4d5b-b991-f462c8dd6bb7') + self.server.groups.add_user(single_group, "5de011f8-5aa9-4d5b-b991-f462c8dd6bb7") - def test_add_user_missing_user_id(self): - with open(POPULATE_USERS, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_add_user_missing_user_id(self) -> None: + with open(POPULATE_USERS, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.baseurl + '/e7833b48-c6f7-47b5-a2a7-36e7dd232758/users', text=response_xml) - single_group = TSC.GroupItem(name='Test Group') - single_group._id = 'e7833b48-c6f7-47b5-a2a7-36e7dd232758' + m.get(self.baseurl + "/e7833b48-c6f7-47b5-a2a7-36e7dd232758/users", text=response_xml) + single_group = TSC.GroupItem(name="Test Group") + single_group._id = "e7833b48-c6f7-47b5-a2a7-36e7dd232758" self.server.groups.populate_users(single_group) - self.assertRaises(ValueError, self.server.groups.add_user, single_group, '') + self.assertRaises(ValueError, self.server.groups.add_user, single_group, "") - def test_add_user_missing_group_id(self): - single_group = TSC.GroupItem('test') - single_group._users = [] - self.assertRaises(TSC.MissingRequiredFieldError, self.server.groups.add_user, single_group, - '5de011f8-5aa9-4d5b-b991-f462c8dd6bb7') + def test_add_user_missing_group_id(self) -> None: + single_group = TSC.GroupItem("test") + self.assertRaises( + TSC.MissingRequiredFieldError, + self.server.groups.add_user, + single_group, + "5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", + ) - def test_remove_user_before_populating(self): - with open(GET_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_remove_user_before_populating(self) -> None: + with open(GET_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.get(self.baseurl, text=response_xml) - m.delete('http://test/api/2.3/sites/dad65087-b08b-4603-af4e-2887b8aafc67/groups/ef8b19c0-43b6-11e6-af50' - '-63f5805dbe3c/users/5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', - text='ok') + m.delete( + self.baseurl + "/ef8b19c0-43b6-11e6-af50-63f5805dbe3c/users/5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", + text="ok", + ) all_groups, pagination_item = self.server.groups.get() single_group = all_groups[0] - self.server.groups.remove_user(single_group, '5de011f8-5aa9-4d5b-b991-f462c8dd6bb7') + self.server.groups.remove_user(single_group, "5de011f8-5aa9-4d5b-b991-f462c8dd6bb7") - def test_remove_user_missing_user_id(self): - with open(POPULATE_USERS, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_remove_user_missing_user_id(self) -> None: + with open(POPULATE_USERS, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.baseurl + '/e7833b48-c6f7-47b5-a2a7-36e7dd232758/users', text=response_xml) - single_group = TSC.GroupItem(name='Test Group') - single_group._id = 'e7833b48-c6f7-47b5-a2a7-36e7dd232758' + m.get(self.baseurl + "/e7833b48-c6f7-47b5-a2a7-36e7dd232758/users", text=response_xml) + single_group = TSC.GroupItem(name="Test Group") + single_group._id = "e7833b48-c6f7-47b5-a2a7-36e7dd232758" self.server.groups.populate_users(single_group) - self.assertRaises(ValueError, self.server.groups.remove_user, single_group, '') + self.assertRaises(ValueError, self.server.groups.remove_user, single_group, "") - def test_remove_user_missing_group_id(self): - single_group = TSC.GroupItem('test') - single_group._users = [] - self.assertRaises(TSC.MissingRequiredFieldError, self.server.groups.remove_user, single_group, - '5de011f8-5aa9-4d5b-b991-f462c8dd6bb7') + def test_remove_user_missing_group_id(self) -> None: + single_group = TSC.GroupItem("test") + self.assertRaises( + TSC.MissingRequiredFieldError, + self.server.groups.remove_user, + single_group, + "5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", + ) - def test_create_group(self): - with open(CREATE_GROUP, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_create_group(self) -> None: + with open(CREATE_GROUP, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.post(self.baseurl, text=response_xml) - group_to_create = TSC.GroupItem(u'試供品') + group_to_create = TSC.GroupItem("試供品") group = self.server.groups.create(group_to_create) - self.assertEqual(group.name, u'試供品') - self.assertEqual(group.id, '3e4a9ea0-a07a-4fe6-b50f-c345c8c81034') + self.assertEqual(group.name, "試供品") + self.assertEqual(group.id, "3e4a9ea0-a07a-4fe6-b50f-c345c8c81034") - def test_update(self): - with open(UPDATE_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_create_ad_group(self) -> None: + with open(CREATE_GROUP_AD, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.baseurl, text=response_xml) + group_to_create = TSC.GroupItem("試供品") + group_to_create.domain_name = "just-has-to-exist" + group = self.server.groups.create_AD_group(group_to_create, False) + self.assertEqual(group.name, "試供品") + self.assertEqual(group.license_mode, "onLogin") + self.assertEqual(group.minimum_site_role, "Creator") + self.assertEqual(group.domain_name, "active-directory-domain-name") + + def test_create_group_async(self) -> None: + with open(CREATE_GROUP_ASYNC, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.put(self.baseurl + '/ef8b19c0-43b6-11e6-af50-63f5805dbe3c', text=response_xml) - group = TSC.GroupItem(name='Test Group') - group._domain_name = 'local' - group._id = 'ef8b19c0-43b6-11e6-af50-63f5805dbe3c' + m.post(self.baseurl, text=response_xml) + group_to_create = TSC.GroupItem("試供品") + group_to_create.domain_name = "woohoo" + job = self.server.groups.create_AD_group(group_to_create, True) + self.assertEqual(job.mode, "Asynchronous") + self.assertEqual(job.type, "GroupImport") + + def test_update(self) -> None: + with open(UPDATE_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.put(self.baseurl + "/ef8b19c0-43b6-11e6-af50-63f5805dbe3c", text=response_xml) + group = TSC.GroupItem(name="Test Group") + group._domain_name = "local" + group._id = "ef8b19c0-43b6-11e6-af50-63f5805dbe3c" group = self.server.groups.update(group) - self.assertEqual('ef8b19c0-43b6-11e6-af50-63f5805dbe3c', group.id) - self.assertEqual('Group updated name', group.name) + self.assertEqual("ef8b19c0-43b6-11e6-af50-63f5805dbe3c", group.id) + self.assertEqual("Group updated name", group.name) + self.assertEqual("ExplorerCanPublish", group.minimum_site_role) + self.assertEqual("onLogin", group.license_mode) + + # async update is not supported for local groups + def test_update_local_async(self) -> None: + group = TSC.GroupItem("myGroup") + group._id = "ef8b19c0-43b6-11e6-af50-63f5805dbe3c" + self.assertRaises(ValueError, self.server.groups.update, group, as_job=True) + + # mimic group returned from server where domain name is set to 'local' + group.domain_name = "local" + self.assertRaises(ValueError, self.server.groups.update, group, as_job=True) + + def test_update_ad_async(self) -> None: + group = TSC.GroupItem("myGroup", "example.com") + group._id = "ef8b19c0-43b6-11e6-af50-63f5805dbe3c" + group.minimum_site_role = TSC.UserItem.Roles.Viewer + + with requests_mock.mock() as m: + m.put(f"{self.baseurl}/{group.id}?asJob=True", text=UPDATE_ASYNC_XML.read_bytes().decode("utf8")) + job = self.server.groups.update(group, as_job=True) + + self.assertEqual(job.id, "c2566efc-0767-4f15-89cb-56acb4349c1b") + self.assertEqual(job.mode, "Asynchronous") + self.assertEqual(job.type, "GroupSync") diff --git a/test/test_group_model.py b/test/test_group_model.py index eb11adcdd..659a3611f 100644 --- a/test/test_group_model.py +++ b/test/test_group_model.py @@ -1,14 +1,15 @@ import unittest + import tableauserverclient as TSC class GroupModelTests(unittest.TestCase): - def test_invalid_name(self): - self.assertRaises(ValueError, TSC.GroupItem, None) - self.assertRaises(ValueError, TSC.GroupItem, "") + def test_invalid_minimum_site_role(self): group = TSC.GroupItem("grp") with self.assertRaises(ValueError): - group.name = None + group.minimum_site_role = "Captain" + def test_invalid_license_mode(self): + group = TSC.GroupItem("grp") with self.assertRaises(ValueError): - group.name = "" + group.license_mode = "off" diff --git a/test/test_groupsets.py b/test/test_groupsets.py new file mode 100644 index 000000000..5479809d2 --- /dev/null +++ b/test/test_groupsets.py @@ -0,0 +1,139 @@ +from pathlib import Path +import unittest + +from defusedxml.ElementTree import fromstring +import requests_mock + +import tableauserverclient as TSC +from tableauserverclient.models.reference_item import ResourceReference + +TEST_ASSET_DIR = Path(__file__).parent / "assets" +GROUPSET_CREATE = TEST_ASSET_DIR / "groupsets_create.xml" +GROUPSETS_GET = TEST_ASSET_DIR / "groupsets_get.xml" +GROUPSET_GET_BY_ID = TEST_ASSET_DIR / "groupsets_get_by_id.xml" +GROUPSET_UPDATE = TEST_ASSET_DIR / "groupsets_get_by_id.xml" + + +class TestGroupSets(unittest.TestCase): + def setUp(self) -> None: + self.server = TSC.Server("http://test", False) + self.server.version = "3.22" + + # Fake signin + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + + self.baseurl = self.server.group_sets.baseurl + + def test_get(self) -> None: + with requests_mock.mock() as m: + m.get(self.baseurl, text=GROUPSETS_GET.read_text()) + groupsets, pagination_item = self.server.group_sets.get() + + assert len(groupsets) == 3 + assert pagination_item.total_available == 3 + assert groupsets[0].id == "1a2b3c4d-5e6f-7a8b-9c0d-1e2f3a4b5c6d" + assert groupsets[0].name == "All Users" + assert groupsets[0].group_count == 1 + assert groupsets[0].groups[0].name == "group-one" + assert groupsets[0].groups[0].id == "gs-1" + + assert groupsets[1].id == "9a8a7b6b-5c4c-3d2d-1e0e-9a8a7b6b5b4b" + assert groupsets[1].name == "active-directory-group-import" + assert groupsets[1].group_count == 1 + assert groupsets[1].groups[0].name == "group-two" + assert groupsets[1].groups[0].id == "gs21" + + assert groupsets[2].id == "7b6b59a8-ac3c-4d1d-2e9e-0b5b4ba8a7b6" + assert groupsets[2].name == "local-group-license-on-login" + assert groupsets[2].group_count == 1 + assert groupsets[2].groups[0].name == "group-three" + assert groupsets[2].groups[0].id == "gs-3" + + def test_get_by_id(self) -> None: + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/1a2b3c4d-5e6f-7a8b-9c0d-1e2f3a4b5c6d", text=GROUPSET_GET_BY_ID.read_text()) + groupset = self.server.group_sets.get_by_id("1a2b3c4d-5e6f-7a8b-9c0d-1e2f3a4b5c6d") + + assert groupset.id == "1a2b3c4d-5e6f-7a8b-9c0d-1e2f3a4b5c6d" + assert groupset.name == "All Users" + assert groupset.group_count == 3 + assert len(groupset.groups) == 3 + + assert groupset.groups[0].name == "group-one" + assert groupset.groups[0].id == "gs-1" + assert groupset.groups[1].name == "group-two" + assert groupset.groups[1].id == "gs21" + assert groupset.groups[2].name == "group-three" + assert groupset.groups[2].id == "gs-3" + + def test_update(self) -> None: + id_ = "1a2b3c4d-5e6f-7a8b-9c0d-1e2f3a4b5c6d" + groupset = TSC.GroupSetItem("All Users") + groupset.id = id_ + with requests_mock.mock() as m: + m.put(f"{self.baseurl}/{id_}", text=GROUPSET_UPDATE.read_text()) + groupset = self.server.group_sets.update(groupset) + + assert groupset.id == id_ + assert groupset.name == "All Users" + assert groupset.group_count == 3 + assert len(groupset.groups) == 3 + + assert groupset.groups[0].name == "group-one" + assert groupset.groups[0].id == "gs-1" + assert groupset.groups[1].name == "group-two" + assert groupset.groups[1].id == "gs21" + assert groupset.groups[2].name == "group-three" + assert groupset.groups[2].id == "gs-3" + + def test_create(self) -> None: + groupset = TSC.GroupSetItem("All Users") + with requests_mock.mock() as m: + m.post(self.baseurl, text=GROUPSET_CREATE.read_text()) + groupset = self.server.group_sets.create(groupset) + + assert groupset.id == "1a2b3c4d-5e6f-7a8b-9c0d-1e2f3a4b5c6d" + assert groupset.name == "All Users" + assert groupset.group_count == 0 + assert len(groupset.groups) == 0 + + def test_add_group(self) -> None: + groupset = TSC.GroupSetItem("All") + groupset.id = "1a2b3c4d-5e6f-7a8b-9c0d-1e2f3a4b5c6d" + group = TSC.GroupItem("Example") + group._id = "ef8b19c0-43b6-11e6-af50-63f5805dbe3c" + + with requests_mock.mock() as m: + m.put(f"{self.baseurl}/{groupset.id}/groups/{group._id}") + self.server.group_sets.add_group(groupset, group) + + history = m.request_history + + assert len(history) == 1 + assert history[0].method == "PUT" + assert history[0].url == f"{self.baseurl}/{groupset.id}/groups/{group._id}" + + def test_remove_group(self) -> None: + groupset = TSC.GroupSetItem("All") + groupset.id = "1a2b3c4d-5e6f-7a8b-9c0d-1e2f3a4b5c6d" + group = TSC.GroupItem("Example") + group._id = "ef8b19c0-43b6-11e6-af50-63f5805dbe3c" + + with requests_mock.mock() as m: + m.delete(f"{self.baseurl}/{groupset.id}/groups/{group._id}") + self.server.group_sets.remove_group(groupset, group) + + history = m.request_history + + assert len(history) == 1 + assert history[0].method == "DELETE" + assert history[0].url == f"{self.baseurl}/{groupset.id}/groups/{group._id}" + + def test_as_reference(self) -> None: + groupset = TSC.GroupSetItem() + groupset.id = "1a2b3c4d-5e6f-7a8b-9c0d-1e2f3a4b5c6d" + ref = groupset.as_reference(groupset.id) + assert ref.id == groupset.id + assert ref.tag_name == groupset.tag_name + assert isinstance(ref, ResourceReference) diff --git a/test/test_job.py b/test/test_job.py index 5da0f76fa..20b238764 100644 --- a/test/test_job.py +++ b/test/test_job.py @@ -1,29 +1,35 @@ -import unittest import os +import unittest from datetime import datetime + import requests_mock + import tableauserverclient as TSC from tableauserverclient.datetime_helpers import utc +from tableauserverclient.server.endpoint.exceptions import JobFailedException +from ._utils import read_xml_asset, mocked_time -TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets') - -GET_XML = os.path.join(TEST_ASSET_DIR, 'job_get.xml') +GET_XML = "job_get.xml" +GET_BY_ID_XML = "job_get_by_id.xml" +GET_BY_ID_FAILED_XML = "job_get_by_id_failed.xml" +GET_BY_ID_CANCELLED_XML = "job_get_by_id_cancelled.xml" +GET_BY_ID_INPROGRESS_XML = "job_get_by_id_inprogress.xml" +GET_BY_ID_WORKBOOK = "job_get_by_id_failed_workbook.xml" class JobTests(unittest.TestCase): - def setUp(self): - self.server = TSC.Server('http://test') - self.server.version = '3.1' + def setUp(self) -> None: + self.server = TSC.Server("http://test", False) + self.server.version = "3.1" # Fake signin - self.server._site_id = 'dad65087-b08b-4603-af4e-2887b8aafc67' - self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM' + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" self.baseurl = self.server.jobs.baseurl - def test_get(self): - with open(GET_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_get(self) -> None: + response_xml = read_xml_asset(GET_XML) with requests_mock.mock() as m: m.get(self.baseurl, text=response_xml) all_jobs, pagination_item = self.server.jobs.get() @@ -32,20 +38,109 @@ def test_get(self): started_at = datetime(2018, 5, 22, 13, 0, 37, tzinfo=utc) ended_at = datetime(2018, 5, 22, 13, 0, 45, tzinfo=utc) - self.assertEquals(1, pagination_item.total_available) - self.assertEquals('2eef4225-aa0c-41c4-8662-a76d89ed7336', job.id) - self.assertEquals('Success', job.status) - self.assertEquals('50', job.priority) - self.assertEquals('single_subscription_notify', job.type) - self.assertEquals(created_at, job.created_at) - self.assertEquals(started_at, job.started_at) - self.assertEquals(ended_at, job.ended_at) + self.assertEqual(1, pagination_item.total_available) + self.assertEqual("2eef4225-aa0c-41c4-8662-a76d89ed7336", job.id) + self.assertEqual("Success", job.status) + self.assertEqual("50", job.priority) + self.assertEqual("single_subscription_notify", job.type) + self.assertEqual(created_at, job.created_at) + self.assertEqual(started_at, job.started_at) + self.assertEqual(ended_at, job.ended_at) + + def test_get_by_id(self) -> None: + response_xml = read_xml_asset(GET_BY_ID_XML) + job_id = "2eef4225-aa0c-41c4-8662-a76d89ed7336" + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/{job_id}", text=response_xml) + job = self.server.jobs.get_by_id(job_id) + updated_at = datetime(2020, 5, 13, 20, 25, 18, tzinfo=utc) + + self.assertEqual(job_id, job.id) + self.assertEqual(updated_at, job.updated_at) + self.assertListEqual(job.notes, ["Job detail notes"]) - def test_get_before_signin(self): + def test_get_before_signin(self) -> None: self.server._auth_token = None self.assertRaises(TSC.NotSignedInError, self.server.jobs.get) - def test_cancel(self): + def test_cancel_id(self) -> None: + with requests_mock.mock() as m: + m.put(self.baseurl + "/ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", status_code=204) + self.server.jobs.cancel("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760") + + def test_cancel_item(self) -> None: + created_at = datetime(2018, 5, 22, 13, 0, 29, tzinfo=utc) + started_at = datetime(2018, 5, 22, 13, 0, 37, tzinfo=utc) + job = TSC.JobItem("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", "backgroundJob", "0", created_at, started_at, None, 0) with requests_mock.mock() as m: - m.put(self.baseurl + '/ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', status_code=204) - self.server.jobs.cancel('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760') + m.put(self.baseurl + "/ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", status_code=204) + self.server.jobs.cancel(job) + + def test_wait_for_job_finished(self) -> None: + # Waiting for an already finished job, directly returns that job's info + response_xml = read_xml_asset(GET_BY_ID_XML) + job_id = "2eef4225-aa0c-41c4-8662-a76d89ed7336" + with mocked_time(), requests_mock.mock() as m: + m.get(f"{self.baseurl}/{job_id}", text=response_xml) + job = self.server.jobs.wait_for_job(job_id) + + self.assertEqual(job_id, job.id) + self.assertListEqual(job.notes, ["Job detail notes"]) + + def test_wait_for_job_failed(self) -> None: + # Waiting for a failed job raises an exception + response_xml = read_xml_asset(GET_BY_ID_FAILED_XML) + job_id = "77d5e57a-2517-479f-9a3c-a32025f2b64d" + with mocked_time(), requests_mock.mock() as m: + m.get(f"{self.baseurl}/{job_id}", text=response_xml) + with self.assertRaises(JobFailedException): + self.server.jobs.wait_for_job(job_id) + + def test_wait_for_job_timeout(self) -> None: + # Waiting for a job which doesn't terminate will throw an exception + response_xml = read_xml_asset(GET_BY_ID_INPROGRESS_XML) + job_id = "77d5e57a-2517-479f-9a3c-a32025f2b64d" + with mocked_time(), requests_mock.mock() as m: + m.get(f"{self.baseurl}/{job_id}", text=response_xml) + with self.assertRaises(TimeoutError): + self.server.jobs.wait_for_job(job_id, timeout=30) + + def test_get_job_datasource_id(self) -> None: + response_xml = read_xml_asset(GET_BY_ID_FAILED_XML) + job_id = "777bf7c4-421d-4b2c-a518-11b90187c545" + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/{job_id}", text=response_xml) + job = self.server.jobs.get_by_id(job_id) + self.assertEqual(job.datasource_id, "03b9fbec-81f6-4160-ae49-5f9f6d412758") + + def test_get_job_workbook_id(self) -> None: + response_xml = read_xml_asset(GET_BY_ID_WORKBOOK) + job_id = "bb1aab79-db54-4e96-9dd3-461d8f081d08" + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/{job_id}", text=response_xml) + job = self.server.jobs.get_by_id(job_id) + self.assertEqual(job.workbook_id, "5998aaaf-1abe-4d38-b4d9-bc53e85bdd13") + + def test_get_job_workbook_name(self) -> None: + response_xml = read_xml_asset(GET_BY_ID_WORKBOOK) + job_id = "bb1aab79-db54-4e96-9dd3-461d8f081d08" + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/{job_id}", text=response_xml) + job = self.server.jobs.get_by_id(job_id) + self.assertEqual(job.workbook_name, "Superstore") + + def test_get_job_datasource_name(self) -> None: + response_xml = read_xml_asset(GET_BY_ID_FAILED_XML) + job_id = "777bf7c4-421d-4b2c-a518-11b90187c545" + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/{job_id}", text=response_xml) + job = self.server.jobs.get_by_id(job_id) + self.assertEqual(job.datasource_name, "World Indicators") + + def test_background_job_str(self) -> None: + job = TSC.BackgroundJobItem( + "ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", datetime.now(), 1, "extractRefresh", "Failed" + ) + assert not str(job).startswith("< None: + self.server = TSC.Server("http://test", False) + self.server.version = "3.15" + + # Fake signin + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + + self.baseurl = self.server.linked_tasks.baseurl + + def test_parse_linked_task_flow_run(self): + xml = fromstring(GET_LINKED_TASKS.read_bytes()) + task_runs = LinkedTaskFlowRunItem._parse_element(xml, self.server.namespace) + assert 1 == len(task_runs) + task = task_runs[0] + assert task.flow_run_id == "e3d1fc25-5644-4e32-af35-58dcbd1dbd73" + assert task.flow_run_priority == 1 + assert task.flow_run_consecutive_failed_count == 3 + assert task.flow_run_task_type == "runFlow" + assert task.flow_id == "ab1231eb-b8ca-461e-a131-83f3c2b6a673" + assert task.flow_name == "flow-name" + + def test_parse_linked_task_step(self): + xml = fromstring(GET_LINKED_TASKS.read_bytes()) + steps = LinkedTaskStepItem.from_task_xml(xml, self.server.namespace) + assert 1 == len(steps) + step = steps[0] + assert step.id == "f554a4df-bb6f-4294-94ee-9a709ef9bda0" + assert step.stop_downstream_on_failure + assert step.step_number == 1 + assert 1 == len(step.task_details) + task = step.task_details[0] + assert task.flow_run_id == "e3d1fc25-5644-4e32-af35-58dcbd1dbd73" + assert task.flow_run_priority == 1 + assert task.flow_run_consecutive_failed_count == 3 + assert task.flow_run_task_type == "runFlow" + assert task.flow_id == "ab1231eb-b8ca-461e-a131-83f3c2b6a673" + assert task.flow_name == "flow-name" + + def test_parse_linked_task(self): + tasks = LinkedTaskItem.from_response(GET_LINKED_TASKS.read_bytes(), self.server.namespace) + assert 1 == len(tasks) + task = tasks[0] + assert task.id == "1b8211dc-51a8-45ce-a831-b5921708e03e" + assert task.num_steps == 1 + assert task.schedule is not None + assert task.schedule.id == "be077332-d01d-481b-b2f3-917e463d4dca" + + def test_get_linked_tasks(self): + with requests_mock.mock() as m: + m.get(self.baseurl, text=GET_LINKED_TASKS.read_text()) + tasks, pagination_item = self.server.linked_tasks.get() + + assert 1 == len(tasks) + task = tasks[0] + assert task.id == "1b8211dc-51a8-45ce-a831-b5921708e03e" + assert task.num_steps == 1 + assert task.schedule is not None + assert task.schedule.id == "be077332-d01d-481b-b2f3-917e463d4dca" + + def test_get_by_id_str_linked_task(self): + id_ = "1b8211dc-51a8-45ce-a831-b5921708e03e" + + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/{id_}", text=GET_LINKED_TASKS.read_text()) + task = self.server.linked_tasks.get_by_id(id_) + + assert task.id == "1b8211dc-51a8-45ce-a831-b5921708e03e" + assert task.num_steps == 1 + assert task.schedule is not None + assert task.schedule.id == "be077332-d01d-481b-b2f3-917e463d4dca" + + def test_get_by_id_obj_linked_task(self): + id_ = "1b8211dc-51a8-45ce-a831-b5921708e03e" + in_task = LinkedTaskItem() + in_task.id = id_ + + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/{id_}", text=GET_LINKED_TASKS.read_text()) + task = self.server.linked_tasks.get_by_id(in_task) + + assert task.id == "1b8211dc-51a8-45ce-a831-b5921708e03e" + assert task.num_steps == 1 + assert task.schedule is not None + assert task.schedule.id == "be077332-d01d-481b-b2f3-917e463d4dca" + + def test_run_now_str_linked_task(self): + id_ = "1b8211dc-51a8-45ce-a831-b5921708e03e" + + with requests_mock.mock() as m: + m.post(f"{self.baseurl}/{id_}/runNow", text=RUN_LINKED_TASK_NOW.read_text()) + job = self.server.linked_tasks.run_now(id_) + + assert job.id == "269a1e5a-1220-4a13-ac01-704982693dd8" + assert job.status == "InProgress" + assert job.created_at == parse_datetime("2022-02-15T00:22:22Z") + assert job.linked_task_id == id_ + + def test_run_now_obj_linked_task(self): + id_ = "1b8211dc-51a8-45ce-a831-b5921708e03e" + in_task = LinkedTaskItem() + in_task.id = id_ + + with requests_mock.mock() as m: + m.post(f"{self.baseurl}/{id_}/runNow", text=RUN_LINKED_TASK_NOW.read_text()) + job = self.server.linked_tasks.run_now(in_task) + + assert job.id == "269a1e5a-1220-4a13-ac01-704982693dd8" + assert job.status == "InProgress" + assert job.created_at == parse_datetime("2022-02-15T00:22:22Z") + assert job.linked_task_id == id_ diff --git a/test/test_metadata.py b/test/test_metadata.py new file mode 100644 index 000000000..1dc9cf1c6 --- /dev/null +++ b/test/test_metadata.py @@ -0,0 +1,102 @@ +import json +import os.path +import unittest + +import requests_mock + +import tableauserverclient as TSC +from tableauserverclient.server.endpoint.exceptions import GraphQLError + +TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets") + +METADATA_QUERY_SUCCESS = os.path.join(TEST_ASSET_DIR, "metadata_query_success.json") +METADATA_QUERY_ERROR = os.path.join(TEST_ASSET_DIR, "metadata_query_error.json") +EXPECTED_PAGED_DICT = os.path.join(TEST_ASSET_DIR, "metadata_query_expected_dict.dict") + +METADATA_PAGE_1 = os.path.join(TEST_ASSET_DIR, "metadata_paged_1.json") +METADATA_PAGE_2 = os.path.join(TEST_ASSET_DIR, "metadata_paged_2.json") +METADATA_PAGE_3 = os.path.join(TEST_ASSET_DIR, "metadata_paged_3.json") + +EXPECTED_DICT = { + "publishedDatasources": [ + {"id": "01cf92b2-2d17-b656-fc48-5c25ef6d5352", "name": "Batters (TestV1)"}, + {"id": "020ae1cd-c356-f1ad-a846-b0094850d22a", "name": "SharePoint_List_sharepoint2010.test.tsi.lan"}, + {"id": "061493a0-c3b2-6f39-d08c-bc3f842b44af", "name": "Batters_mongodb"}, + {"id": "089fe515-ad2f-89bc-94bd-69f55f69a9c2", "name": "Sample - Superstore"}, + ] +} + +EXPECTED_DICT_ERROR = [{"message": "Reached time limit of PT5S for query execution.", "path": None, "extensions": None}] + + +class MetadataTests(unittest.TestCase): + def setUp(self): + self.server = TSC.Server("http://test", False) + self.baseurl = self.server.metadata.baseurl + self.server.version = "3.5" + + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + + def test_metadata_query(self): + with open(METADATA_QUERY_SUCCESS, "rb") as f: + response_json = json.loads(f.read().decode()) + with requests_mock.mock() as m: + m.post(self.baseurl, json=response_json) + actual = self.server.metadata.query("fake query") + + datasources = actual["data"] + + self.assertDictEqual(EXPECTED_DICT, datasources) + + def test_paged_metadata_query(self): + with open(EXPECTED_PAGED_DICT, "rb") as f: + expected = eval(f.read()) + + # prepare the 3 pages of results + with open(METADATA_PAGE_1, "rb") as f: + result_1 = f.read().decode() + with open(METADATA_PAGE_2, "rb") as f: + result_2 = f.read().decode() + with open(METADATA_PAGE_3, "rb") as f: + result_3 = f.read().decode() + + with requests_mock.mock() as m: + m.post( + self.baseurl, + [ + {"text": result_1, "status_code": 200}, + {"text": result_2, "status_code": 200}, + {"text": result_3, "status_code": 200}, + ], + ) + + # validation checks for endCursor and hasNextPage, + # but the query text doesn't matter for the test + actual = self.server.metadata.paginated_query( + "fake query endCursor hasNextPage", variables={"first": 1, "afterToken": None} + ) + + self.assertDictEqual(expected, actual) + + def test_metadata_query_ignore_error(self): + with open(METADATA_QUERY_ERROR, "rb") as f: + response_json = json.loads(f.read().decode()) + with requests_mock.mock() as m: + m.post(self.baseurl, json=response_json) + actual = self.server.metadata.query("fake query") + datasources = actual["data"] + + self.assertNotEqual(actual.get("errors", None), None) + self.assertListEqual(EXPECTED_DICT_ERROR, actual["errors"]) + self.assertDictEqual(EXPECTED_DICT, datasources) + + def test_metadata_query_abort_on_error(self): + with open(METADATA_QUERY_ERROR, "rb") as f: + response_json = json.loads(f.read().decode()) + with requests_mock.mock() as m: + m.post(self.baseurl, json=response_json) + + with self.assertRaises(GraphQLError) as e: + self.server.metadata.query("fake query", abort_on_error=True) + self.assertListEqual(e.error, EXPECTED_DICT_ERROR) diff --git a/test/test_metrics.py b/test/test_metrics.py new file mode 100644 index 000000000..7628abb1a --- /dev/null +++ b/test/test_metrics.py @@ -0,0 +1,105 @@ +import unittest +import requests_mock +from pathlib import Path + +import tableauserverclient as TSC +from tableauserverclient.datetime_helpers import format_datetime + +assets = Path(__file__).parent / "assets" +METRICS_GET = assets / "metrics_get.xml" +METRICS_GET_BY_ID = assets / "metrics_get_by_id.xml" +METRICS_UPDATE = assets / "metrics_update.xml" + + +class TestMetrics(unittest.TestCase): + def setUp(self) -> None: + self.server = TSC.Server("http://test", False) + + # Fake signin + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + self.server.version = "3.9" + + self.baseurl = self.server.metrics.baseurl + + def test_metrics_get(self) -> None: + with requests_mock.mock() as m: + m.get(self.baseurl, text=METRICS_GET.read_text()) + all_metrics, pagination_item = self.server.metrics.get() + + self.assertEqual(len(all_metrics), 2) + self.assertEqual(pagination_item.total_available, 27) + self.assertEqual(all_metrics[0].id, "6561daa3-20e8-407f-ba09-709b178c0b4a") + self.assertEqual(all_metrics[0].name, "Example metric") + self.assertEqual(all_metrics[0].description, "Description of my metric.") + self.assertEqual(all_metrics[0].webpage_url, "https://test/#/site/site-name/metrics/3") + self.assertEqual(format_datetime(all_metrics[0].created_at), "2020-01-02T01:02:03Z") + self.assertEqual(format_datetime(all_metrics[0].updated_at), "2020-01-02T01:02:03Z") + self.assertEqual(all_metrics[0].suspended, True) + self.assertEqual(all_metrics[0].project_id, "32e79edb-6cfd-47dc-ad79-e8ec2fbb1d33") + self.assertEqual(all_metrics[0].project_name, "Default") + self.assertEqual(all_metrics[0].owner_id, "32e79edb-6cfd-47dc-ad79-e8ec2fbb1d33") + self.assertEqual(all_metrics[0].view_id, "29dae0cd-1862-4a20-a638-e2c2dfa682d4") + self.assertEqual(len(all_metrics[0].tags), 0) + + self.assertEqual(all_metrics[1].id, "721760d9-0aa4-4029-87ae-371c956cea07") + self.assertEqual(all_metrics[1].name, "Another Example metric") + self.assertEqual(all_metrics[1].description, "Description of another metric.") + self.assertEqual(all_metrics[1].webpage_url, "https://test/#/site/site-name/metrics/4") + self.assertEqual(format_datetime(all_metrics[1].created_at), "2020-01-03T01:02:03Z") + self.assertEqual(format_datetime(all_metrics[1].updated_at), "2020-01-04T01:02:03Z") + self.assertEqual(all_metrics[1].suspended, False) + self.assertEqual(all_metrics[1].project_id, "486e0de0-2258-45bd-99cf-b62013e19f4e") + self.assertEqual(all_metrics[1].project_name, "Assets") + self.assertEqual(all_metrics[1].owner_id, "1bbbc2b9-847d-443c-9a1f-dbcf112b8814") + self.assertEqual(all_metrics[1].view_id, "7dbfdb63-a6ca-4723-93ee-4fefc71992d3") + self.assertEqual(len(all_metrics[1].tags), 2) + self.assertIn("Test", all_metrics[1].tags) + self.assertIn("Asset", all_metrics[1].tags) + + def test_metrics_get_by_id(self) -> None: + luid = "6561daa3-20e8-407f-ba09-709b178c0b4a" + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/{luid}", text=METRICS_GET_BY_ID.read_text()) + metric = self.server.metrics.get_by_id(luid) + + self.assertEqual(metric.id, "6561daa3-20e8-407f-ba09-709b178c0b4a") + self.assertEqual(metric.name, "Example metric") + self.assertEqual(metric.description, "Description of my metric.") + self.assertEqual(metric.webpage_url, "https://test/#/site/site-name/metrics/3") + self.assertEqual(format_datetime(metric.created_at), "2020-01-02T01:02:03Z") + self.assertEqual(format_datetime(metric.updated_at), "2020-01-02T01:02:03Z") + self.assertEqual(metric.suspended, True) + self.assertEqual(metric.project_id, "32e79edb-6cfd-47dc-ad79-e8ec2fbb1d33") + self.assertEqual(metric.project_name, "Default") + self.assertEqual(metric.owner_id, "32e79edb-6cfd-47dc-ad79-e8ec2fbb1d33") + self.assertEqual(metric.view_id, "29dae0cd-1862-4a20-a638-e2c2dfa682d4") + self.assertEqual(len(metric.tags), 0) + + def test_metrics_delete(self) -> None: + luid = "6561daa3-20e8-407f-ba09-709b178c0b4a" + with requests_mock.mock() as m: + m.delete(f"{self.baseurl}/{luid}") + self.server.metrics.delete(luid) + + def test_metrics_update(self) -> None: + luid = "6561daa3-20e8-407f-ba09-709b178c0b4a" + metric = TSC.MetricItem() + metric._id = luid + + with requests_mock.mock() as m: + m.put(f"{self.baseurl}/{luid}", text=METRICS_UPDATE.read_text()) + metric = self.server.metrics.update(metric) + + self.assertEqual(metric.id, "6561daa3-20e8-407f-ba09-709b178c0b4a") + self.assertEqual(metric.name, "Example metric") + self.assertEqual(metric.description, "Description of my metric.") + self.assertEqual(metric.webpage_url, "https://test/#/site/site-name/metrics/3") + self.assertEqual(format_datetime(metric.created_at), "2020-01-02T01:02:03Z") + self.assertEqual(format_datetime(metric.updated_at), "2020-01-02T01:02:03Z") + self.assertEqual(metric.suspended, True) + self.assertEqual(metric.project_id, "32e79edb-6cfd-47dc-ad79-e8ec2fbb1d33") + self.assertEqual(metric.project_name, "Default") + self.assertEqual(metric.owner_id, "32e79edb-6cfd-47dc-ad79-e8ec2fbb1d33") + self.assertEqual(metric.view_id, "29dae0cd-1862-4a20-a638-e2c2dfa682d4") + self.assertEqual(len(metric.tags), 0) diff --git a/test/test_pager.py b/test/test_pager.py index 52089180d..1836095bb 100644 --- a/test/test_pager.py +++ b/test/test_pager.py @@ -1,32 +1,49 @@ -import unittest +import contextlib import os +import unittest +import xml.etree.ElementTree as ET + import requests_mock + import tableauserverclient as TSC +from tableauserverclient.config import config -TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets') +TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets") -GET_XML_PAGE1 = os.path.join(TEST_ASSET_DIR, 'workbook_get_page_1.xml') -GET_XML_PAGE2 = os.path.join(TEST_ASSET_DIR, 'workbook_get_page_2.xml') -GET_XML_PAGE3 = os.path.join(TEST_ASSET_DIR, 'workbook_get_page_3.xml') +GET_VIEW_XML = os.path.join(TEST_ASSET_DIR, "view_get.xml") +GET_XML_PAGE1 = os.path.join(TEST_ASSET_DIR, "workbook_get_page_1.xml") +GET_XML_PAGE2 = os.path.join(TEST_ASSET_DIR, "workbook_get_page_2.xml") +GET_XML_PAGE3 = os.path.join(TEST_ASSET_DIR, "workbook_get_page_3.xml") + + +@contextlib.contextmanager +def set_env(**environ): + old_environ = dict(os.environ) + os.environ.update(environ) + try: + yield + finally: + os.environ.clear() + os.environ.update(old_environ) class PagerTests(unittest.TestCase): def setUp(self): - self.server = TSC.Server('http://test') + self.server = TSC.Server("http://test", False) # Fake sign in - self.server._site_id = 'dad65087-b08b-4603-af4e-2887b8aafc67' - self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM' + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" self.baseurl = self.server.workbooks.baseurl - def test_pager_with_no_options(self): - with open(GET_XML_PAGE1, 'rb') as f: - page_1 = f.read().decode('utf-8') - with open(GET_XML_PAGE2, 'rb') as f: - page_2 = f.read().decode('utf-8') - with open(GET_XML_PAGE3, 'rb') as f: - page_3 = f.read().decode('utf-8') + def test_pager_with_no_options(self) -> None: + with open(GET_XML_PAGE1, "rb") as f: + page_1 = f.read().decode("utf-8") + with open(GET_XML_PAGE2, "rb") as f: + page_2 = f.read().decode("utf-8") + with open(GET_XML_PAGE3, "rb") as f: + page_3 = f.read().decode("utf-8") with requests_mock.mock() as m: # Register Pager with default request options m.get(self.baseurl, text=page_1) @@ -42,17 +59,17 @@ def test_pager_with_no_options(self): # Let's check that workbook items aren't duplicates wb1, wb2, wb3 = workbooks - self.assertEqual(wb1.name, 'Page1Workbook') - self.assertEqual(wb2.name, 'Page2Workbook') - self.assertEqual(wb3.name, 'Page3Workbook') - - def test_pager_with_options(self): - with open(GET_XML_PAGE1, 'rb') as f: - page_1 = f.read().decode('utf-8') - with open(GET_XML_PAGE2, 'rb') as f: - page_2 = f.read().decode('utf-8') - with open(GET_XML_PAGE3, 'rb') as f: - page_3 = f.read().decode('utf-8') + self.assertEqual(wb1.name, "Page1Workbook") + self.assertEqual(wb2.name, "Page2Workbook") + self.assertEqual(wb3.name, "Page3Workbook") + + def test_pager_with_options(self) -> None: + with open(GET_XML_PAGE1, "rb") as f: + page_1 = f.read().decode("utf-8") + with open(GET_XML_PAGE2, "rb") as f: + page_2 = f.read().decode("utf-8") + with open(GET_XML_PAGE3, "rb") as f: + page_3 = f.read().decode("utf-8") with requests_mock.mock() as m: # Register Pager with some pages m.get(self.baseurl + "?pageNumber=1&pageSize=1", complete_qs=True, text=page_1) @@ -67,17 +84,17 @@ def test_pager_with_options(self): # Check that the workbooks are the 2 we think they should be wb2, wb3 = workbooks - self.assertEqual(wb2.name, 'Page2Workbook') - self.assertEqual(wb3.name, 'Page3Workbook') + self.assertEqual(wb2.name, "Page2Workbook") + self.assertEqual(wb3.name, "Page3Workbook") # Starting on 1 with pagesize of 3 should get all 3 opts = TSC.RequestOptions(1, 3) workbooks = list(TSC.Pager(self.server.workbooks, opts)) self.assertTrue(len(workbooks) == 3) wb1, wb2, wb3 = workbooks - self.assertEqual(wb1.name, 'Page1Workbook') - self.assertEqual(wb2.name, 'Page2Workbook') - self.assertEqual(wb3.name, 'Page3Workbook') + self.assertEqual(wb1.name, "Page1Workbook") + self.assertEqual(wb2.name, "Page2Workbook") + self.assertEqual(wb3.name, "Page3Workbook") # Starting on 3 with pagesize of 1 should get the last item opts = TSC.RequestOptions(3, 1) @@ -85,4 +102,35 @@ def test_pager_with_options(self): self.assertTrue(len(workbooks) == 1) # Should have the last workbook wb3 = workbooks.pop() - self.assertEqual(wb3.name, 'Page3Workbook') + self.assertEqual(wb3.name, "Page3Workbook") + + def test_pager_with_env_var(self) -> None: + with set_env(TSC_PAGE_SIZE="1000"): + assert config.PAGE_SIZE == 1000 + loop = TSC.Pager(self.server.workbooks) + assert loop._options.pagesize == 1000 + + def test_queryset_with_env_var(self) -> None: + with set_env(TSC_PAGE_SIZE="1000"): + assert config.PAGE_SIZE == 1000 + loop = self.server.workbooks.all() + assert loop.request_options.pagesize == 1000 + + def test_pager_view(self) -> None: + with open(GET_VIEW_XML, "rb") as f: + view_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.server.views.baseurl, text=view_xml) + for view in TSC.Pager(self.server.views): + assert view.name is not None + + def test_queryset_no_matches(self) -> None: + elem = ET.Element("tsResponse", xmlns="http://tableau.com/api") + ET.SubElement(elem, "pagination", totalAvailable="0") + ET.SubElement(elem, "groups") + xml = ET.tostring(elem).decode("utf-8") + with requests_mock.mock() as m: + m.get(self.server.groups.baseurl, text=xml) + all_groups = self.server.groups.all() + groups = list(all_groups) + assert len(groups) == 0 diff --git a/test/test_permissionsrule.py b/test/test_permissionsrule.py new file mode 100644 index 000000000..d7bceb258 --- /dev/null +++ b/test/test_permissionsrule.py @@ -0,0 +1,104 @@ +import unittest + +import tableauserverclient as TSC +from tableauserverclient.models.reference_item import ResourceReference + + +class TestPermissionsRules(unittest.TestCase): + def test_and(self): + grantee = ResourceReference("a", "user") + rule1 = TSC.PermissionsRule( + grantee, + { + TSC.Permission.Capability.ExportData: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Delete: TSC.Permission.Mode.Deny, + TSC.Permission.Capability.ViewComments: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ExportXml: TSC.Permission.Mode.Deny, + }, + ) + rule2 = TSC.PermissionsRule( + grantee, + { + TSC.Permission.Capability.ExportData: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Delete: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ExportXml: TSC.Permission.Mode.Deny, + }, + ) + + composite = rule1 & rule2 + + self.assertEqual(composite.capabilities.get(TSC.Permission.Capability.ExportData), TSC.Permission.Mode.Allow) + self.assertEqual(composite.capabilities.get(TSC.Permission.Capability.Delete), TSC.Permission.Mode.Deny) + self.assertEqual(composite.capabilities.get(TSC.Permission.Capability.ViewComments), None) + self.assertEqual(composite.capabilities.get(TSC.Permission.Capability.ExportXml), TSC.Permission.Mode.Deny) + + def test_or(self): + grantee = ResourceReference("a", "user") + rule1 = TSC.PermissionsRule( + grantee, + { + TSC.Permission.Capability.ExportData: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Delete: TSC.Permission.Mode.Deny, + TSC.Permission.Capability.ViewComments: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ExportXml: TSC.Permission.Mode.Deny, + }, + ) + rule2 = TSC.PermissionsRule( + grantee, + { + TSC.Permission.Capability.ExportData: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Delete: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ExportXml: TSC.Permission.Mode.Deny, + }, + ) + + composite = rule1 | rule2 + + self.assertEqual(composite.capabilities.get(TSC.Permission.Capability.ExportData), TSC.Permission.Mode.Allow) + self.assertEqual(composite.capabilities.get(TSC.Permission.Capability.Delete), TSC.Permission.Mode.Allow) + self.assertEqual(composite.capabilities.get(TSC.Permission.Capability.ViewComments), TSC.Permission.Mode.Allow) + self.assertEqual(composite.capabilities.get(TSC.Permission.Capability.ExportXml), TSC.Permission.Mode.Deny) + + def test_eq_false(self): + grantee = ResourceReference("a", "user") + rule1 = TSC.PermissionsRule( + grantee, + { + TSC.Permission.Capability.ExportData: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Delete: TSC.Permission.Mode.Deny, + TSC.Permission.Capability.ViewComments: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ExportXml: TSC.Permission.Mode.Deny, + }, + ) + rule2 = TSC.PermissionsRule( + grantee, + { + TSC.Permission.Capability.ExportData: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Delete: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ExportXml: TSC.Permission.Mode.Deny, + }, + ) + + self.assertNotEqual(rule1, rule2) + + def test_eq_true(self): + grantee = ResourceReference("a", "user") + rule1 = TSC.PermissionsRule( + grantee, + { + TSC.Permission.Capability.ExportData: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Delete: TSC.Permission.Mode.Deny, + TSC.Permission.Capability.ViewComments: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ExportXml: TSC.Permission.Mode.Deny, + }, + ) + rule2 = TSC.PermissionsRule( + grantee, + { + TSC.Permission.Capability.ExportData: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Delete: TSC.Permission.Mode.Deny, + TSC.Permission.Capability.ViewComments: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ExportXml: TSC.Permission.Mode.Deny, + }, + ) + self.assertEqual(rule1, rule2) diff --git a/test/test_project.py b/test/test_project.py index 1c86c3b5c..a80d4919c 100644 --- a/test/test_project.py +++ b/test/test_project.py @@ -1,107 +1,309 @@ -import unittest import os +import unittest + import requests_mock + import tableauserverclient as TSC from tableauserverclient.datetime_helpers import format_datetime +from tableauserverclient import GroupItem +from ._utils import read_xml_asset, asset -TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets') +TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets") -GET_XML = os.path.join(TEST_ASSET_DIR, 'project_get.xml') -UPDATE_XML = os.path.join(TEST_ASSET_DIR, 'project_update.xml') -CREATE_XML = os.path.join(TEST_ASSET_DIR, 'project_create.xml') +GET_XML = asset("project_get.xml") +UPDATE_XML = asset("project_update.xml") +SET_CONTENT_PERMISSIONS_XML = asset("project_content_permission.xml") +CREATE_XML = asset("project_create.xml") +POPULATE_PERMISSIONS_XML = "project_populate_permissions.xml" +POPULATE_WORKBOOK_DEFAULT_PERMISSIONS_XML = "project_populate_workbook_default_permissions.xml" +UPDATE_DATASOURCE_DEFAULT_PERMISSIONS_XML = "project_update_datasource_default_permissions.xml" class ProjectTests(unittest.TestCase): - def setUp(self): - self.server = TSC.Server('http://test') + def setUp(self) -> None: + self.server = TSC.Server("http://test", False) # Fake signin - self.server._site_id = 'dad65087-b08b-4603-af4e-2887b8aafc67' - self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM' + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" self.baseurl = self.server.projects.baseurl - def test_get(self): - with open(GET_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_get(self) -> None: + with open(GET_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.get(self.baseurl, text=response_xml) all_projects, pagination_item = self.server.projects.get() - self.assertEqual(2, pagination_item.total_available) - pr1 = all_projects[0] - pr2 = all_projects[1] - - self.assertEqual('bdd975c6-4042-11e9-a712-975dc31937aa', pr1.id) - self.assertEqual('Default', pr1.name) - self.assertEqual('The default project that was automatically created by Tableau.', pr1.description) - self.assertEqual(True, pr1.top_level_project) - self.assertEqual('2019-03-06T19:04:57Z', format_datetime(pr1.created_at)) - self.assertEqual('2019-03-06T19:04:58Z', format_datetime(pr1.updated_at)) - self.assertEqual('ManagedByOwner', pr1.content_permissions) - self.assertEqual('f9e32d4b-ca36-43bb-bc58-29ad45b10be5', pr1.owner_id) - self.assertEqual('_system', pr1.owner_name) - - self.assertEqual('7e593a18-c6e2-469c-9aca-4b2782693777', pr2.id) - self.assertEqual('update', pr2.name) - self.assertEqual('upd', pr2.description) - self.assertEqual(False, pr2.top_level_project) - self.assertEqual('bdd975c6-4042-11e9-a712-975dc31937aa', pr2.parent_id) - self.assertEqual('2019-03-13T22:18:18Z', format_datetime(pr2.created_at)) - self.assertEqual('2019-03-14T17:13:40Z', format_datetime(pr2.updated_at)) - self.assertEqual('ManagedByOwner', pr2.content_permissions) - self.assertEqual('344356bd-a847-4d6c-8370-8b2821498cdb', pr2.owner_id) - self.assertEqual('testadmin', pr2.owner_name) - - def test_get_before_signin(self): + self.assertEqual(3, pagination_item.total_available) + self.assertEqual("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", all_projects[0].id) + self.assertEqual("default", all_projects[0].name) + self.assertEqual("The default project that was automatically created by Tableau.", all_projects[0].description) + self.assertEqual("ManagedByOwner", all_projects[0].content_permissions) + self.assertEqual(None, all_projects[0].parent_id) + self.assertEqual("dd2239f6-ddf1-4107-981a-4cf94e415794", all_projects[0].owner_id) + self.assertEqual('f9e32d4b-ca36-43bb-bc58-29ad45b10be5', all_projects[0].owner_id) + self.assertEqual('_system', all_projects[0].owner_name) + + self.assertEqual("1d0304cd-3796-429f-b815-7258370b9b74", all_projects[1].id) + self.assertEqual("Tableau", all_projects[1].name) + self.assertEqual("ManagedByOwner", all_projects[1].content_permissions) + self.assertEqual(None, all_projects[1].parent_id) + self.assertEqual("2a47bbf8-8900-4ebb-b0a4-2723bd7c46c3", all_projects[1].owner_id) + + self.assertEqual("4cc52973-5e3a-4d1f-a4fb-5b5f73796edf", all_projects[2].id) + self.assertEqual("Tableau > Child 1", all_projects[2].name) + self.assertEqual("ManagedByOwner", all_projects[2].content_permissions) + self.assertEqual("1d0304cd-3796-429f-b815-7258370b9b74", all_projects[2].parent_id) + self.assertEqual("dd2239f6-ddf1-4107-981a-4cf94e415794", all_projects[2].owner_id) + self.assertEqual('testadmin', all_projects[2].owner_name) + + def test_get_before_signin(self) -> None: self.server._auth_token = None self.assertRaises(TSC.NotSignedInError, self.server.projects.get) - def test_delete(self): + def test_delete(self) -> None: with requests_mock.mock() as m: - m.delete(self.baseurl + '/ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', status_code=204) - self.server.projects.delete('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760') + m.delete(self.baseurl + "/ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", status_code=204) + self.server.projects.delete("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760") - def test_delete_missing_id(self): - self.assertRaises(ValueError, self.server.projects.delete, '') + def test_delete_missing_id(self) -> None: + self.assertRaises(ValueError, self.server.projects.delete, "") - def test_update(self): - with open(UPDATE_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_update(self) -> None: + with open(UPDATE_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.put(self.baseurl + '/1d0304cd-3796-429f-b815-7258370b9b74', text=response_xml) - single_project = TSC.ProjectItem(name='Test Project', - content_permissions='LockedToProject', - description='Project created for testing', - parent_id='9a8f2265-70f3-4494-96c5-e5949d7a1120') - single_project._id = '1d0304cd-3796-429f-b815-7258370b9b74' + m.put(self.baseurl + "/1d0304cd-3796-429f-b815-7258370b9b74", text=response_xml) + single_project = TSC.ProjectItem( + name="Test Project", + content_permissions="LockedToProject", + description="Project created for testing", + parent_id="9a8f2265-70f3-4494-96c5-e5949d7a1120", + ) + single_project._id = "1d0304cd-3796-429f-b815-7258370b9b74" + single_project.owner_id = "dd2239f6-ddf1-4107-981a-4cf94e415794" single_project = self.server.projects.update(single_project) - self.assertEqual('1d0304cd-3796-429f-b815-7258370b9b74', single_project.id) - self.assertEqual('Test Project', single_project.name) - self.assertEqual('Project created for testing', single_project.description) - self.assertEqual('LockedToProject', single_project.content_permissions) - self.assertEqual('9a8f2265-70f3-4494-96c5-e5949d7a1120', single_project.parent_id) + self.assertEqual("1d0304cd-3796-429f-b815-7258370b9b74", single_project.id) + self.assertEqual("Test Project", single_project.name) + self.assertEqual("Project created for testing", single_project.description) + self.assertEqual("LockedToProject", single_project.content_permissions) + self.assertEqual("9a8f2265-70f3-4494-96c5-e5949d7a1120", single_project.parent_id) + self.assertEqual("dd2239f6-ddf1-4107-981a-4cf94e415794", single_project.owner_id) + + def test_content_permission_locked_to_project_without_nested(self) -> None: + with open(SET_CONTENT_PERMISSIONS_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.put(self.baseurl + "/cb3759e5-da4a-4ade-b916-7e2b4ea7ec86", text=response_xml) + project_item = TSC.ProjectItem( + name="Test Project Permissions", + content_permissions="LockedToProjectWithoutNested", + description="Project created for testing", + parent_id="7687bc43-a543-42f3-b86f-80caed03a813", + ) + project_item._id = "cb3759e5-da4a-4ade-b916-7e2b4ea7ec86" + project_item = self.server.projects.update(project_item) + self.assertEqual("cb3759e5-da4a-4ade-b916-7e2b4ea7ec86", project_item.id) + self.assertEqual("Test Project Permissions", project_item.name) + self.assertEqual("Project created for testing", project_item.description) + self.assertEqual("LockedToProjectWithoutNested", project_item.content_permissions) + self.assertEqual("7687bc43-a543-42f3-b86f-80caed03a813", project_item.parent_id) + + def test_update_datasource_default_permission(self) -> None: + response_xml = read_xml_asset(UPDATE_DATASOURCE_DEFAULT_PERMISSIONS_XML) + with requests_mock.mock() as m: + m.put( + self.baseurl + "/b4065286-80f0-11ea-af1b-cb7191f48e45/default-permissions/datasources", + text=response_xml, + ) + project = TSC.ProjectItem("test-project") + project._id = "b4065286-80f0-11ea-af1b-cb7191f48e45" + + group = TSC.GroupItem("test-group") + group._id = "b4488bce-80f0-11ea-af1c-976d0c1dab39" + + capabilities = {TSC.Permission.Capability.ExportXml: TSC.Permission.Mode.Deny} + + rules = [TSC.PermissionsRule(grantee=GroupItem.as_reference(group._id), capabilities=capabilities)] + + new_rules = self.server.projects.update_datasource_default_permissions(project, rules) + + self.assertEqual("b4488bce-80f0-11ea-af1c-976d0c1dab39", new_rules[0].grantee.id) + + updated_capabilities = new_rules[0].capabilities + self.assertEqual(4, len(updated_capabilities)) + self.assertEqual("Deny", updated_capabilities["ExportXml"]) + self.assertEqual("Allow", updated_capabilities["Read"]) + self.assertEqual("Allow", updated_capabilities["Write"]) + self.assertEqual("Allow", updated_capabilities["Connect"]) - def test_update_missing_id(self): - single_project = TSC.ProjectItem('test') + def test_update_missing_id(self) -> None: + single_project = TSC.ProjectItem("test") self.assertRaises(TSC.MissingRequiredFieldError, self.server.projects.update, single_project) - def test_create(self): - with open(CREATE_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_create(self) -> None: + with open(CREATE_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.post(self.baseurl, text=response_xml) - new_project = TSC.ProjectItem(name='Test Project', description='Project created for testing') - new_project.content_permissions = 'ManagedByOwner' - new_project.parent_id = '9a8f2265-70f3-4494-96c5-e5949d7a1120' + new_project = TSC.ProjectItem(name="Test Project", description="Project created for testing") + new_project.content_permissions = "ManagedByOwner" + new_project.parent_id = "9a8f2265-70f3-4494-96c5-e5949d7a1120" new_project = self.server.projects.create(new_project) - self.assertEqual('ccbea03f-77c4-4209-8774-f67bc59c3cef', new_project.id) - self.assertEqual('Test Project', new_project.name) - self.assertEqual('Project created for testing', new_project.description) - self.assertEqual('ManagedByOwner', new_project.content_permissions) - self.assertEqual('9a8f2265-70f3-4494-96c5-e5949d7a1120', new_project.parent_id) + self.assertEqual("ccbea03f-77c4-4209-8774-f67bc59c3cef", new_project.id) + self.assertEqual("Test Project", new_project.name) + self.assertEqual("Project created for testing", new_project.description) + self.assertEqual("ManagedByOwner", new_project.content_permissions) + self.assertEqual("9a8f2265-70f3-4494-96c5-e5949d7a1120", new_project.parent_id) + + def test_create_missing_name(self) -> None: + TSC.ProjectItem() + + def test_populate_permissions(self) -> None: + with open(asset(POPULATE_PERMISSIONS_XML), "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.baseurl + "/0448d2ed-590d-4fa0-b272-a2a8a24555b5/permissions", text=response_xml) + single_project = TSC.ProjectItem("Project3") + single_project._id = "0448d2ed-590d-4fa0-b272-a2a8a24555b5" + + self.server.projects.populate_permissions(single_project) + permissions = single_project.permissions + + self.assertEqual(permissions[0].grantee.tag_name, "group") + self.assertEqual(permissions[0].grantee.id, "c8f2773a-c83a-11e8-8c8f-33e6d787b506") + self.assertDictEqual( + permissions[0].capabilities, + { + TSC.Permission.Capability.Write: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Read: TSC.Permission.Mode.Allow, + }, + ) + + def test_populate_workbooks(self) -> None: + response_xml = read_xml_asset(POPULATE_WORKBOOK_DEFAULT_PERMISSIONS_XML) + with requests_mock.mock() as m: + m.get( + self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/default-permissions/workbooks", text=response_xml + ) + single_project = TSC.ProjectItem("test", "1d0304cd-3796-429f-b815-7258370b9b74") + single_project.owner_id = "dd2239f6-ddf1-4107-981a-4cf94e415794" + single_project._id = "9dbd2263-16b5-46e1-9c43-a76bb8ab65fb" + + self.server.projects.populate_workbook_default_permissions(single_project) + permissions = single_project.default_workbook_permissions + + rule1 = permissions.pop() + + self.assertEqual("c8f2773a-c83a-11e8-8c8f-33e6d787b506", rule1.grantee.id) + self.assertEqual("group", rule1.grantee.tag_name) + self.assertDictEqual( + rule1.capabilities, + { + TSC.Permission.Capability.Write: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Read: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Filter: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ChangePermissions: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.WebAuthoring: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ExportData: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ExportXml: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ExportImage: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Delete: TSC.Permission.Mode.Deny, + TSC.Permission.Capability.ShareView: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ViewUnderlyingData: TSC.Permission.Mode.Deny, + TSC.Permission.Capability.ViewComments: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.AddComment: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ChangeHierarchy: TSC.Permission.Mode.Allow, + }, + ) + + def test_delete_permission(self) -> None: + with open(asset(POPULATE_PERMISSIONS_XML), "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.baseurl + "/0448d2ed-590d-4fa0-b272-a2a8a24555b5/permissions", text=response_xml) + + single_group = TSC.GroupItem("Group1") + single_group._id = "c8f2773a-c83a-11e8-8c8f-33e6d787b506" + + single_project = TSC.ProjectItem("Project3") + single_project._id = "0448d2ed-590d-4fa0-b272-a2a8a24555b5" + + self.server.projects.populate_permissions(single_project) + permissions = single_project.permissions + + capabilities = {} + + for permission in permissions: + if permission.grantee.tag_name == "group": + if permission.grantee.id == single_group._id: + capabilities = permission.capabilities + + rules = TSC.PermissionsRule(grantee=GroupItem.as_reference(single_group._id), capabilities=capabilities) + + endpoint = f"{single_project._id}/permissions/groups/{single_group._id}" + m.delete(f"{self.baseurl}/{endpoint}/Read/Allow", status_code=204) + m.delete(f"{self.baseurl}/{endpoint}/Write/Allow", status_code=204) + self.server.projects.delete_permission(item=single_project, rules=rules) + + def test_delete_workbook_default_permission(self) -> None: + with open(asset(POPULATE_WORKBOOK_DEFAULT_PERMISSIONS_XML), "rb") as f: + response_xml = f.read().decode("utf-8") + + with requests_mock.mock() as m: + m.get( + self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/default-permissions/workbooks", text=response_xml + ) + + single_group = TSC.GroupItem("Group1") + single_group._id = "c8f2773a-c83a-11e8-8c8f-33e6d787b506" + + single_project = TSC.ProjectItem("test", "1d0304cd-3796-429f-b815-7258370b9b74") + single_project._owner_id = "dd2239f6-ddf1-4107-981a-4cf94e415794" + single_project._id = "9dbd2263-16b5-46e1-9c43-a76bb8ab65fb" + + self.server.projects.populate_workbook_default_permissions(single_project) + permissions = single_project.default_workbook_permissions + + capabilities = { + # View + TSC.Permission.Capability.Read: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ExportImage: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ExportData: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ViewComments: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.AddComment: TSC.Permission.Mode.Allow, + # Interact/Edit + TSC.Permission.Capability.Filter: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ViewUnderlyingData: TSC.Permission.Mode.Deny, + TSC.Permission.Capability.ShareView: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.WebAuthoring: TSC.Permission.Mode.Allow, + # Edit + TSC.Permission.Capability.Write: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ExportXml: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ChangeHierarchy: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Delete: TSC.Permission.Mode.Deny, + TSC.Permission.Capability.ChangePermissions: TSC.Permission.Mode.Allow, + } + + rules = TSC.PermissionsRule(grantee=GroupItem.as_reference(single_group._id), capabilities=capabilities) - def test_create_missing_name(self): - self.assertRaises(ValueError, TSC.ProjectItem, '') + endpoint = f"{single_project._id}/default-permissions/workbooks/groups/{single_group._id}" + m.delete(f"{self.baseurl}/{endpoint}/Read/Allow", status_code=204) + m.delete(f"{self.baseurl}/{endpoint}/ExportImage/Allow", status_code=204) + m.delete(f"{self.baseurl}/{endpoint}/ExportData/Allow", status_code=204) + m.delete(f"{self.baseurl}/{endpoint}/ViewComments/Allow", status_code=204) + m.delete(f"{self.baseurl}/{endpoint}/AddComment/Allow", status_code=204) + m.delete(f"{self.baseurl}/{endpoint}/Filter/Allow", status_code=204) + m.delete(f"{self.baseurl}/{endpoint}/ViewUnderlyingData/Deny", status_code=204) + m.delete(f"{self.baseurl}/{endpoint}/ShareView/Allow", status_code=204) + m.delete(f"{self.baseurl}/{endpoint}/WebAuthoring/Allow", status_code=204) + m.delete(f"{self.baseurl}/{endpoint}/Write/Allow", status_code=204) + m.delete(f"{self.baseurl}/{endpoint}/ExportXml/Allow", status_code=204) + m.delete(f"{self.baseurl}/{endpoint}/ChangeHierarchy/Allow", status_code=204) + m.delete(f"{self.baseurl}/{endpoint}/Delete/Deny", status_code=204) + m.delete(f"{self.baseurl}/{endpoint}/ChangePermissions/Allow", status_code=204) + self.server.projects.delete_workbook_default_permissions(item=single_project, rule=rules) diff --git a/test/test_project_model.py b/test/test_project_model.py index 56e6c3d11..ecfe1bd14 100644 --- a/test/test_project_model.py +++ b/test/test_project_model.py @@ -1,17 +1,14 @@ import unittest + import tableauserverclient as TSC class ProjectModelTests(unittest.TestCase): - def test_invalid_name(self): - self.assertRaises(ValueError, TSC.ProjectItem, None) - self.assertRaises(ValueError, TSC.ProjectItem, "") + def test_nullable_name(self): + TSC.ProjectItem(None) + TSC.ProjectItem("") project = TSC.ProjectItem("proj") - with self.assertRaises(ValueError): - project.name = None - - with self.assertRaises(ValueError): - project.name = "" + project.name = None def test_invalid_content_permissions(self): project = TSC.ProjectItem("proj") diff --git a/test/test_regression_tests.py b/test/test_regression_tests.py index 8958c3cf8..62e301591 100644 --- a/test/test_regression_tests.py +++ b/test/test_regression_tests.py @@ -1,23 +1,83 @@ import unittest +from unittest import mock + import tableauserverclient.server.request_factory as factory -from tableauserverclient.server.endpoint import Endpoint +from tableauserverclient.helpers.strings import redact_xml +from tableauserverclient.filesys_helpers import to_filename, make_download_path class BugFix257(unittest.TestCase): def test_empty_request_works(self): result = factory.EmptyRequest().empty_req() - self.assertEqual(b'', result) + self.assertEqual(b"", result) + + +class FileSysHelpers(unittest.TestCase): + def test_to_filename(self): + invalid = [ + "23brhafbjrjhkbbea.txt", + "a_b_C.txt", + "windows space.txt", + "abc#def.txt", + "t@bL3A()", + ] + + valid = [ + "23brhafbjrjhkbbea.txt", + "a_b_C.txt", + "windows space.txt", + "abcdef.txt", + "tbL3A", + ] + + self.assertTrue(all([(to_filename(i) == v) for i, v in zip(invalid, valid)])) + + def test_make_download_path(self): + no_file_path = (None, "file.ext") + has_file_path_folder = ("/root/folder/", "file.ext") + has_file_path_file = ("outx", "file.ext") + + self.assertEqual("file.ext", make_download_path(*no_file_path)) + self.assertEqual("outx.ext", make_download_path(*has_file_path_file)) + + with mock.patch("os.path.isdir") as mocked_isdir: + mocked_isdir.return_value = True + self.assertEqual("/root/folder/file.ext", make_download_path(*has_file_path_folder)) -class BugFix273(unittest.TestCase): - def test_binary_log_truncated(self): +class LoggingTest(unittest.TestCase): + def test_redact_password_string(self): + redacted = redact_xml( + "this is password: my_super_secret_passphrase_which_nobody_should_ever_see password: value" + ) + assert redacted.find("value") == -1 + assert redacted.find("secret") == -1 + assert redacted.find("ever_see") == -1 + assert redacted.find("my_super_secret_passphrase_which_nobody_should_ever_see") == -1 - class FakeResponse(object): + def test_redact_password_bytes(self): + redacted = redact_xml( + b"" + ) + assert redacted.find(b"value") == -1 + assert redacted.find(b"secret") == -1 - headers = {'Content-Type': 'application/octet-stream'} - content = b'\x1337' * 1000 - status_code = 200 + def test_redact_password_with_special_char(self): + redacted = redact_xml( + " " + ) + assert redacted.find("my_s per_secre>_passphrase_which_nobody_should_ever_see with password: value") == -1 - server_response = FakeResponse() + def test_redact_password_not_xml(self): + redacted = redact_xml( + " " + ) + assert redacted.find("my_s per_secre>_passphrase_which_nobody_should_ever_see") == -1 - self.assertEqual(Endpoint._safe_to_log(server_response), '[Truncated File Contents]') + def test_redact_password_really_not_xml(self): + redacted = redact_xml( + "value='this is a nondescript text line which is public' password='my_s per_secre>_passphrase_which_nobody_should_ever_see with password: value and then a cookie " + ) + assert redacted.find("my_s per_secre>_passphrase_which_nobody_should_ever_see") == -1 + assert redacted.find("passphrase") == -1, redacted + assert redacted.find("cookie") == -1, redacted diff --git a/test/test_request_option.py b/test/test_request_option.py index c5afcc3b2..7405189a3 100644 --- a/test/test_request_option.py +++ b/test/test_request_option.py @@ -1,33 +1,43 @@ -import unittest import os +from pathlib import Path +import re +import unittest +from urllib.parse import parse_qs + import requests_mock + import tableauserverclient as TSC -TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets') +TEST_ASSET_DIR = Path(__file__).parent / "assets" -PAGINATION_XML = os.path.join(TEST_ASSET_DIR, 'request_option_pagination.xml') -PAGE_NUMBER_XML = os.path.join(TEST_ASSET_DIR, 'request_option_page_number.xml') -PAGE_SIZE_XML = os.path.join(TEST_ASSET_DIR, 'request_option_page_size.xml') -FILTER_EQUALS = os.path.join(TEST_ASSET_DIR, 'request_option_filter_equals.xml') -FILTER_TAGS_IN = os.path.join(TEST_ASSET_DIR, 'request_option_filter_tags_in.xml') -FILTER_MULTIPLE = os.path.join(TEST_ASSET_DIR, 'request_option_filter_tags_in.xml') +PAGINATION_XML = os.path.join(TEST_ASSET_DIR, "request_option_pagination.xml") +PAGE_NUMBER_XML = os.path.join(TEST_ASSET_DIR, "request_option_page_number.xml") +PAGE_SIZE_XML = os.path.join(TEST_ASSET_DIR, "request_option_page_size.xml") +FILTER_EQUALS = os.path.join(TEST_ASSET_DIR, "request_option_filter_equals.xml") +FILTER_NAME_IN = os.path.join(TEST_ASSET_DIR, "request_option_filter_name_in.xml") +FILTER_TAGS_IN = os.path.join(TEST_ASSET_DIR, "request_option_filter_tags_in.xml") +FILTER_MULTIPLE = os.path.join(TEST_ASSET_DIR, "request_option_filter_tags_in.xml") +SLICING_QUERYSET = os.path.join(TEST_ASSET_DIR, "request_option_slicing_queryset.xml") +SLICING_QUERYSET_PAGE_1 = TEST_ASSET_DIR / "queryset_slicing_page_1.xml" +SLICING_QUERYSET_PAGE_2 = TEST_ASSET_DIR / "queryset_slicing_page_2.xml" class RequestOptionTests(unittest.TestCase): - def setUp(self): - self.server = TSC.Server('http://test') + def setUp(self) -> None: + self.server = TSC.Server("http://test", False, http_options={"timeout": 5}) # Fake signin - self.server._site_id = 'dad65087-b08b-4603-af4e-2887b8aafc67' - self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM' + self.server.version = "3.10" + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" - self.baseurl = '{0}/{1}'.format(self.server.sites.baseurl, self.server._site_id) + self.baseurl = f"{self.server.sites.baseurl}/{self.server._site_id}" - def test_pagination(self): - with open(PAGINATION_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_pagination(self) -> None: + with open(PAGINATION_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.baseurl + '/views?pageNumber=1&pageSize=10', text=response_xml) + m.get(self.baseurl + "/views?pageNumber=1&pageSize=10", text=response_xml) req_option = TSC.RequestOptions().page_size(10) all_views, pagination_item = self.server.views.get(req_option) @@ -36,11 +46,11 @@ def test_pagination(self): self.assertEqual(33, pagination_item.total_available) self.assertEqual(10, len(all_views)) - def test_page_number(self): - with open(PAGE_NUMBER_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_page_number(self) -> None: + with open(PAGE_NUMBER_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.baseurl + '/views?pageNumber=3', text=response_xml) + m.get(self.baseurl + "/views?pageNumber=3", text=response_xml) req_option = TSC.RequestOptions().page_number(3) all_views, pagination_item = self.server.views.get(req_option) @@ -49,11 +59,11 @@ def test_page_number(self): self.assertEqual(210, pagination_item.total_available) self.assertEqual(10, len(all_views)) - def test_page_size(self): - with open(PAGE_SIZE_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_page_size(self) -> None: + with open(PAGE_SIZE_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.baseurl + '/views?pageSize=5', text=response_xml) + m.get(self.baseurl + "/views?pageSize=5", text=response_xml) req_option = TSC.RequestOptions().page_size(5) all_views, pagination_item = self.server.views.get(req_option) @@ -62,48 +72,299 @@ def test_page_size(self): self.assertEqual(33, pagination_item.total_available) self.assertEqual(5, len(all_views)) - def test_filter_equals(self): - with open(FILTER_EQUALS, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_filter_equals(self) -> None: + with open(FILTER_EQUALS, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.baseurl + '/workbooks?filter=name:eq:RESTAPISample', text=response_xml) + m.get(self.baseurl + "/workbooks?filter=name:eq:RESTAPISample", text=response_xml) req_option = TSC.RequestOptions() - req_option.filter.add(TSC.Filter(TSC.RequestOptions.Field.Name, - TSC.RequestOptions.Operator.Equals, 'RESTAPISample')) + req_option.filter.add( + TSC.Filter(TSC.RequestOptions.Field.Name, TSC.RequestOptions.Operator.Equals, "RESTAPISample") + ) matching_workbooks, pagination_item = self.server.workbooks.get(req_option) self.assertEqual(2, pagination_item.total_available) - self.assertEqual('RESTAPISample', matching_workbooks[0].name) - self.assertEqual('RESTAPISample', matching_workbooks[1].name) + self.assertEqual("RESTAPISample", matching_workbooks[0].name) + self.assertEqual("RESTAPISample", matching_workbooks[1].name) - def test_filter_tags_in(self): - with open(FILTER_TAGS_IN, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_filter_equals_shorthand(self) -> None: + with open(FILTER_EQUALS, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.baseurl + '/workbooks?filter=tags:in:[sample,safari,weather]', text=response_xml) + m.get(self.baseurl + "/workbooks?filter=name:eq:RESTAPISample", text=response_xml) + matching_workbooks = self.server.workbooks.filter(name="RESTAPISample").order_by("name") + + self.assertEqual(2, matching_workbooks.total_available) + self.assertEqual("RESTAPISample", matching_workbooks[0].name) + self.assertEqual("RESTAPISample", matching_workbooks[1].name) + + def test_filter_tags_in(self) -> None: + with open(FILTER_TAGS_IN, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.baseurl + "/workbooks?filter=tags:in:[sample,safari,weather]", text=response_xml) req_option = TSC.RequestOptions() - req_option.filter.add(TSC.Filter(TSC.RequestOptions.Field.Tags, TSC.RequestOptions.Operator.In, - ['sample', 'safari', 'weather'])) + req_option.filter.add( + TSC.Filter( + TSC.RequestOptions.Field.Tags, TSC.RequestOptions.Operator.In, ["sample", "safari", "weather"] + ) + ) matching_workbooks, pagination_item = self.server.workbooks.get(req_option) self.assertEqual(3, pagination_item.total_available) - self.assertEqual(set(['weather']), matching_workbooks[0].tags) - self.assertEqual(set(['safari']), matching_workbooks[1].tags) - self.assertEqual(set(['sample']), matching_workbooks[2].tags) + self.assertEqual({"weather"}, matching_workbooks[0].tags) + self.assertEqual({"safari"}, matching_workbooks[1].tags) + self.assertEqual({"sample"}, matching_workbooks[2].tags) + + # check if filtered projects with spaces & special characters + # get correctly returned + def test_filter_name_in(self) -> None: + with open(FILTER_NAME_IN, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get( + self.baseurl + "/projects?filter=name%3Ain%3A%5Bdefault%2CSalesforce+Sales+Proje%C5%9Bt%5D", + text=response_xml, + ) + req_option = TSC.RequestOptions() + req_option.filter.add( + TSC.Filter( + TSC.RequestOptions.Field.Name, + TSC.RequestOptions.Operator.In, + ["default", "Salesforce Sales Projeśt"], + ) + ) + matching_projects, pagination_item = self.server.projects.get(req_option) + + self.assertEqual(2, pagination_item.total_available) + self.assertEqual("default", matching_projects[0].name) + self.assertEqual("Salesforce Sales Projeśt", matching_projects[1].name) + + def test_filter_tags_in_shorthand(self) -> None: + with open(FILTER_TAGS_IN, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.baseurl + "/workbooks?filter=tags:in:[sample,safari,weather]", text=response_xml) + matching_workbooks = self.server.workbooks.filter(tags__in=["sample", "safari", "weather"]) + + self.assertEqual(3, matching_workbooks.total_available) + self.assertEqual({"weather"}, matching_workbooks[0].tags) + self.assertEqual({"safari"}, matching_workbooks[1].tags) + self.assertEqual({"sample"}, matching_workbooks[2].tags) - def test_multiple_filter_options(self): - with open(FILTER_MULTIPLE, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_invalid_shorthand_option(self) -> None: + with self.assertRaises(ValueError): + self.server.workbooks.filter(nonexistant__in=["sample", "safari"]) + + def test_multiple_filter_options(self) -> None: + with open(FILTER_MULTIPLE, "rb") as f: + response_xml = f.read().decode("utf-8") # To ensure that this is deterministic, run this a few times with requests_mock.mock() as m: # Sometimes pep8 requires you to do things you might not otherwise do - url = ''.join((self.baseurl, '/workbooks?pageNumber=1&pageSize=100&', - 'filter=name:eq:foo,tags:in:[sample,safari,weather]')) + url = "".join( + ( + self.baseurl, + "/workbooks?pageNumber=1&pageSize=100&", + "filter=name:eq:foo,tags:in:[sample,safari,weather]", + ) + ) m.get(url, text=response_xml) req_option = TSC.RequestOptions() - req_option.filter.add(TSC.Filter(TSC.RequestOptions.Field.Tags, TSC.RequestOptions.Operator.In, - ['sample', 'safari', 'weather'])) - req_option.filter.add(TSC.Filter(TSC.RequestOptions.Field.Name, TSC.RequestOptions.Operator.Equals, 'foo')) - for _ in range(100): + req_option.filter.add( + TSC.Filter( + TSC.RequestOptions.Field.Tags, TSC.RequestOptions.Operator.In, ["sample", "safari", "weather"] + ) + ) + req_option.filter.add(TSC.Filter(TSC.RequestOptions.Field.Name, TSC.RequestOptions.Operator.Equals, "foo")) + for _ in range(5): matching_workbooks, pagination_item = self.server.workbooks.get(req_option) self.assertEqual(3, pagination_item.total_available) + + # Test req_options if url already has query params + def test_double_query_params(self) -> None: + with requests_mock.mock() as m: + m.get(requests_mock.ANY) + url = self.baseurl + "/views?queryParamExists=true" + opts = TSC.RequestOptions() + + opts.filter.add( + TSC.Filter(TSC.RequestOptions.Field.Tags, TSC.RequestOptions.Operator.In, ["stocks", "market"]) + ) + opts.sort.add(TSC.Sort(TSC.RequestOptions.Field.Name, TSC.RequestOptions.Direction.Asc)) + + resp = self.server.workbooks.get_request(url, request_object=opts) + self.assertTrue(re.search("queryparamexists=true", resp.request.query)) + self.assertTrue(re.search("filter=tags%3ain%3a%5bstocks%2cmarket%5d", resp.request.query)) + self.assertTrue(re.search("sort=name%3aasc", resp.request.query)) + + # Test req_options for versions below 3.7 + def test_filter_sort_legacy(self) -> None: + self.server.version = "3.6" + with requests_mock.mock() as m: + m.get(requests_mock.ANY) + url = self.baseurl + "/views?queryParamExists=true" + opts = TSC.RequestOptions() + + opts.filter.add( + TSC.Filter(TSC.RequestOptions.Field.Tags, TSC.RequestOptions.Operator.In, ["stocks", "market"]) + ) + opts.sort.add(TSC.Sort(TSC.RequestOptions.Field.Name, TSC.RequestOptions.Direction.Asc)) + + resp = self.server.workbooks.get_request(url, request_object=opts) + self.assertTrue(re.search("queryparamexists=true", resp.request.query)) + self.assertTrue(re.search("filter=tags:in:%5bstocks,market%5d", resp.request.query)) + self.assertTrue(re.search("sort=name:asc", resp.request.query)) + + def test_vf(self) -> None: + with requests_mock.mock() as m: + m.get(requests_mock.ANY) + url = self.baseurl + "/views/456/data" + opts = TSC.PDFRequestOptions() + opts.vf("name1#", "value1") + opts.vf("name2$", "value2") + opts.page_type = TSC.PDFRequestOptions.PageType.Tabloid + + resp = self.server.workbooks.get_request(url, request_object=opts) + self.assertTrue(re.search("vf_name1%23=value1", resp.request.query)) + self.assertTrue(re.search("vf_name2%24=value2", resp.request.query)) + self.assertTrue(re.search("type=tabloid", resp.request.query)) + + # Test req_options for versions beloe 3.7 + def test_vf_legacy(self) -> None: + self.server.version = "3.6" + with requests_mock.mock() as m: + m.get(requests_mock.ANY) + url = self.baseurl + "/views/456/data" + opts = TSC.PDFRequestOptions() + opts.vf("name1@", "value1") + opts.vf("name2$", "value2") + opts.page_type = TSC.PDFRequestOptions.PageType.Tabloid + + resp = self.server.workbooks.get_request(url, request_object=opts) + self.assertTrue(re.search("vf_name1@=value1", resp.request.query)) + self.assertTrue(re.search("vf_name2\\$=value2", resp.request.query)) + self.assertTrue(re.search("type=tabloid", resp.request.query)) + + def test_all_fields(self) -> None: + with requests_mock.mock() as m: + m.get(requests_mock.ANY) + url = self.baseurl + "/views/456/data" + opts = TSC.RequestOptions() + opts._all_fields = True + + resp = self.server.users.get_request(url, request_object=opts) + self.assertTrue(re.search("fields=_all_", resp.request.query)) + + def test_multiple_filter_options_shorthand(self) -> None: + with open(FILTER_MULTIPLE, "rb") as f: + response_xml = f.read().decode("utf-8") + # To ensure that this is deterministic, run this a few times + with requests_mock.mock() as m: + # Sometimes pep8 requires you to do things you might not otherwise do + url = "".join( + ( + self.baseurl, + "/workbooks?pageNumber=1&pageSize=100&", + "filter=name:eq:foo,tags:in:[sample,safari,weather]", + ) + ) + m.get(url, text=response_xml) + + for _ in range(5): + matching_workbooks = self.server.workbooks.filter(tags__in=["sample", "safari", "weather"], name="foo") + self.assertEqual(3, matching_workbooks.total_available) + + def test_slicing_queryset(self) -> None: + with open(SLICING_QUERYSET, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.baseurl + "/views?pageNumber=1", text=response_xml) + all_views = self.server.views.all() + + self.assertEqual(10, len(all_views[::])) + self.assertEqual(5, len(all_views[::2])) + self.assertEqual(8, len(all_views[2:])) + self.assertEqual(2, len(all_views[:2])) + self.assertEqual(3, len(all_views[2:5])) + self.assertEqual(3, len(all_views[-3:])) + self.assertEqual(3, len(all_views[-6:-3])) + self.assertEqual(3, len(all_views[3:6:-1])) + self.assertEqual(3, len(all_views[6:3:-1])) + self.assertEqual(10, len(all_views[::-1])) + self.assertEqual(all_views[3:6], list(reversed(all_views[3:6:-1]))) + + self.assertEqual(all_views[-3].id, "2df55de2-3a2d-4e34-b515-6d4e70b830e9") + + with self.assertRaises(IndexError): + all_views[100] + + def test_slicing_queryset_multi_page(self) -> None: + with requests_mock.mock() as m: + m.get(self.baseurl + "/views?pageNumber=1", text=SLICING_QUERYSET_PAGE_1.read_text()) + m.get(self.baseurl + "/views?pageNumber=2", text=SLICING_QUERYSET_PAGE_2.read_text()) + sliced_views = self.server.views.all()[9:12] + + self.assertEqual(sliced_views[0].id, "2e6d6c81-da71-4b41-892c-ba80d4e7a6d0") + self.assertEqual(sliced_views[1].id, "47ffcb8e-3f7a-4ecf-8ab3-605da9febe20") + self.assertEqual(sliced_views[2].id, "6757fea8-0aa9-4160-a87c-9be27b1d1c8c") + + def test_queryset_filter_args_error(self) -> None: + with self.assertRaises(RuntimeError): + workbooks = self.server.workbooks.filter("argument") + + def test_filtering_parameters(self) -> None: + self.server.version = "3.6" + with requests_mock.mock() as m: + m.get(requests_mock.ANY) + url = self.baseurl + "/views/456/data" + opts = TSC.PDFRequestOptions() + opts.parameter("name1@", "value1") + opts.parameter("name2$", "value2") + opts.page_type = TSC.PDFRequestOptions.PageType.Tabloid + + resp = self.server.workbooks.get_request(url, request_object=opts) + query_params = parse_qs(resp.request.query) + self.assertIn("name1@", query_params) + self.assertIn("value1", query_params["name1@"]) + self.assertIn("name2$", query_params) + self.assertIn("value2", query_params["name2$"]) + self.assertIn("type", query_params) + self.assertIn("tabloid", query_params["type"]) + + def test_queryset_endpoint_pagesize_all(self) -> None: + for page_size in (1, 10, 100, 1000): + with self.subTest(page_size): + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/views?pageSize={page_size}", text=SLICING_QUERYSET_PAGE_1.read_text()) + queryset = self.server.views.all(page_size=page_size) + assert queryset.request_options.pagesize == page_size + _ = list(queryset) + + def test_queryset_endpoint_pagesize_filter(self) -> None: + for page_size in (1, 10, 100, 1000): + with self.subTest(page_size): + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/views?pageSize={page_size}", text=SLICING_QUERYSET_PAGE_1.read_text()) + queryset = self.server.views.filter(page_size=page_size) + assert queryset.request_options.pagesize == page_size + _ = list(queryset) + + def test_queryset_pagesize_filter(self) -> None: + for page_size in (1, 10, 100, 1000): + with self.subTest(page_size): + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/views?pageSize={page_size}", text=SLICING_QUERYSET_PAGE_1.read_text()) + queryset = self.server.views.all().filter(page_size=page_size) + assert queryset.request_options.pagesize == page_size + _ = list(queryset) + + def test_language_export(self) -> None: + with requests_mock.mock() as m: + m.get(requests_mock.ANY) + url = self.baseurl + "/views/456/data" + opts = TSC.PDFRequestOptions() + opts.language = "en-US" + + resp = self.server.users.get_request(url, request_object=opts) + self.assertTrue(re.search("language=en-us", resp.request.query)) diff --git a/test/test_requests.py b/test/test_requests.py index 686a4bbb4..5c0d090ba 100644 --- a/test/test_requests.py +++ b/test/test_requests.py @@ -1,18 +1,20 @@ +import re import unittest import requests import requests_mock import tableauserverclient as TSC +from tableauserverclient.server.endpoint.exceptions import InternalServerError, NonXMLResponseError class RequestTests(unittest.TestCase): def setUp(self): - self.server = TSC.Server('http://test') + self.server = TSC.Server("http://test", False) # Fake sign in - self.server._site_id = 'dad65087-b08b-4603-af4e-2887b8aafc67' - self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM' + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" self.baseurl = self.server.workbooks.baseurl @@ -20,28 +22,40 @@ def test_make_get_request(self): with requests_mock.mock() as m: m.get(requests_mock.ANY) url = "http://test/api/2.3/sites/dad65087-b08b-4603-af4e-2887b8aafc67/workbooks" - opts = TSC.RequestOptions(pagesize=13, pagenumber=13) - resp = self.server.workbooks._make_request(requests.get, - url, - content=None, - request_object=opts, - auth_token='j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM', - content_type='text/xml') - - self.assertEqual(resp.request.query, 'pagenumber=13&pagesize=13') - self.assertEqual(resp.request.headers['x-tableau-auth'], 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM') - self.assertEqual(resp.request.headers['content-type'], 'text/xml') + opts = TSC.RequestOptions(pagesize=13, pagenumber=15) + resp = self.server.workbooks.get_request(url, request_object=opts) + + self.assertTrue(re.search("pagesize=13", resp.request.query)) + self.assertTrue(re.search("pagenumber=15", resp.request.query)) def test_make_post_request(self): with requests_mock.mock() as m: m.post(requests_mock.ANY) url = "http://test/api/2.3/sites/dad65087-b08b-4603-af4e-2887b8aafc67/workbooks" - resp = self.server.workbooks._make_request(requests.post, - url, - content=b'1337', - request_object=None, - auth_token='j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM', - content_type='multipart/mixed') - self.assertEqual(resp.request.headers['x-tableau-auth'], 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM') - self.assertEqual(resp.request.headers['content-type'], 'multipart/mixed') - self.assertEqual(resp.request.body, b'1337') + resp = self.server.workbooks._make_request( + requests.post, + url, + content=b"1337", + auth_token="j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM", + content_type="multipart/mixed", + ) + self.assertEqual(resp.request.headers["x-tableau-auth"], "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM") + self.assertEqual(resp.request.headers["content-type"], "multipart/mixed") + self.assertTrue(re.search("Tableau Server Client", resp.request.headers["user-agent"])) + self.assertEqual(resp.request.body, b"1337") + + # Test that 500 server errors are handled properly + def test_internal_server_error(self): + self.server.version = "3.2" + server_response = "500: Internal Server Error" + with requests_mock.mock() as m: + m.register_uri("GET", self.server.server_info.baseurl, status_code=500, text=server_response) + self.assertRaisesRegex(InternalServerError, server_response, self.server.server_info.get) + + # Test that non-xml server errors are handled properly + def test_non_xml_error(self): + self.server.version = "3.2" + server_response = "this is not xml" + with requests_mock.mock() as m: + m.register_uri("GET", self.server.server_info.baseurl, status_code=499, text=server_response) + self.assertRaisesRegex(NonXMLResponseError, server_response, self.server.server_info.get) diff --git a/test/test_schedule.py b/test/test_schedule.py index b5aadcbca..b072522a4 100644 --- a/test/test_schedule.py +++ b/test/test_schedule.py @@ -1,27 +1,39 @@ -from datetime import time -import unittest import os +import unittest +from datetime import time + import requests_mock + import tableauserverclient as TSC from tableauserverclient.datetime_helpers import format_datetime TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets") GET_XML = os.path.join(TEST_ASSET_DIR, "schedule_get.xml") +GET_BY_ID_XML = os.path.join(TEST_ASSET_DIR, "schedule_get_by_id.xml") +GET_HOURLY_ID_XML = os.path.join(TEST_ASSET_DIR, "schedule_get_hourly_id.xml") +GET_DAILY_ID_XML = os.path.join(TEST_ASSET_DIR, "schedule_get_daily_id.xml") +GET_MONTHLY_ID_XML = os.path.join(TEST_ASSET_DIR, "schedule_get_monthly_id.xml") +GET_MONTHLY_ID_2_XML = os.path.join(TEST_ASSET_DIR, "schedule_get_monthly_id_2.xml") GET_EMPTY_XML = os.path.join(TEST_ASSET_DIR, "schedule_get_empty.xml") CREATE_HOURLY_XML = os.path.join(TEST_ASSET_DIR, "schedule_create_hourly.xml") CREATE_DAILY_XML = os.path.join(TEST_ASSET_DIR, "schedule_create_daily.xml") CREATE_WEEKLY_XML = os.path.join(TEST_ASSET_DIR, "schedule_create_weekly.xml") CREATE_MONTHLY_XML = os.path.join(TEST_ASSET_DIR, "schedule_create_monthly.xml") UPDATE_XML = os.path.join(TEST_ASSET_DIR, "schedule_update.xml") +ADD_WORKBOOK_TO_SCHEDULE = os.path.join(TEST_ASSET_DIR, "schedule_add_workbook.xml") +ADD_WORKBOOK_TO_SCHEDULE_WITH_WARNINGS = os.path.join(TEST_ASSET_DIR, "schedule_add_workbook_with_warnings.xml") +ADD_DATASOURCE_TO_SCHEDULE = os.path.join(TEST_ASSET_DIR, "schedule_add_datasource.xml") +ADD_FLOW_TO_SCHEDULE = os.path.join(TEST_ASSET_DIR, "schedule_add_flow.xml") -WORKBOOK_GET_BY_ID_XML = os.path.join(TEST_ASSET_DIR, 'workbook_get_by_id.xml') -DATASOURCE_GET_BY_ID_XML = os.path.join(TEST_ASSET_DIR, 'datasource_get_by_id.xml') +WORKBOOK_GET_BY_ID_XML = os.path.join(TEST_ASSET_DIR, "workbook_get_by_id.xml") +DATASOURCE_GET_BY_ID_XML = os.path.join(TEST_ASSET_DIR, "datasource_get_by_id.xml") +FLOW_GET_BY_ID_XML = os.path.join(TEST_ASSET_DIR, "flow_get_by_id.xml") class ScheduleTests(unittest.TestCase): - def setUp(self): - self.server = TSC.Server("http://test") + def setUp(self) -> None: + self.server = TSC.Server("http://test", False) # Fake Signin self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" @@ -29,7 +41,7 @@ def setUp(self): self.baseurl = self.server.schedules.baseurl - def test_get(self): + def test_get(self) -> None: with open(GET_XML, "rb") as f: response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: @@ -39,6 +51,7 @@ def test_get(self): extract = all_schedules[0] subscription = all_schedules[1] flow = all_schedules[2] + system = all_schedules[3] self.assertEqual(2, pagination_item.total_available) self.assertEqual("c9cff7f9-309c-4361-99ff-d4ba8c9f5467", extract.id) @@ -68,7 +81,16 @@ def test_get(self): self.assertEqual("Flow", flow.schedule_type) self.assertEqual("2019-03-01T09:00:00Z", format_datetime(flow.next_run_at)) - def test_get_empty(self): + self.assertEqual("3cfa4713-ce7c-4fa7-aa2e-f752bfc8dd04", system.id) + self.assertEqual("First of the month 2:00AM", system.name) + self.assertEqual("Active", system.state) + self.assertEqual(30, system.priority) + self.assertEqual("2019-02-19T18:52:19Z", format_datetime(system.created_at)) + self.assertEqual("2019-02-19T18:55:51Z", format_datetime(system.updated_at)) + self.assertEqual("System", system.schedule_type) + self.assertEqual("2019-03-01T09:00:00Z", format_datetime(system.next_run_at)) + + def test_get_empty(self) -> None: with open(GET_EMPTY_XML, "rb") as f: response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: @@ -78,21 +100,98 @@ def test_get_empty(self): self.assertEqual(0, pagination_item.total_available) self.assertEqual([], all_schedules) - def test_delete(self): + def test_get_by_id(self) -> None: + self.server.version = "3.8" + with open(GET_BY_ID_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + schedule_id = "c9cff7f9-309c-4361-99ff-d4ba8c9f5467" + baseurl = f"{self.server.baseurl}/schedules/{schedule_id}" + m.get(baseurl, text=response_xml) + schedule = self.server.schedules.get_by_id(schedule_id) + self.assertIsNotNone(schedule) + self.assertEqual(schedule_id, schedule.id) + self.assertEqual("Weekday early mornings", schedule.name) + self.assertEqual("Active", schedule.state) + + def test_get_hourly_by_id(self) -> None: + self.server.version = "3.8" + with open(GET_HOURLY_ID_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + schedule_id = "c9cff7f9-309c-4361-99ff-d4ba8c9f5467" + baseurl = f"{self.server.baseurl}/schedules/{schedule_id}" + m.get(baseurl, text=response_xml) + schedule = self.server.schedules.get_by_id(schedule_id) + self.assertIsNotNone(schedule) + self.assertEqual(schedule_id, schedule.id) + self.assertEqual("Hourly schedule", schedule.name) + self.assertEqual("Active", schedule.state) + self.assertEqual(("Monday", 0.5), schedule.interval_item.interval) + + def test_get_daily_by_id(self) -> None: + self.server.version = "3.8" + with open(GET_DAILY_ID_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + schedule_id = "c9cff7f9-309c-4361-99ff-d4ba8c9f5467" + baseurl = f"{self.server.baseurl}/schedules/{schedule_id}" + m.get(baseurl, text=response_xml) + schedule = self.server.schedules.get_by_id(schedule_id) + self.assertIsNotNone(schedule) + self.assertEqual(schedule_id, schedule.id) + self.assertEqual("Daily schedule", schedule.name) + self.assertEqual("Active", schedule.state) + self.assertEqual(("Monday", 2.0), schedule.interval_item.interval) + + def test_get_monthly_by_id(self) -> None: + self.server.version = "3.8" + with open(GET_MONTHLY_ID_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + schedule_id = "c9cff7f9-309c-4361-99ff-d4ba8c9f5467" + baseurl = f"{self.server.baseurl}/schedules/{schedule_id}" + m.get(baseurl, text=response_xml) + schedule = self.server.schedules.get_by_id(schedule_id) + self.assertIsNotNone(schedule) + self.assertEqual(schedule_id, schedule.id) + self.assertEqual("Monthly multiple days", schedule.name) + self.assertEqual("Active", schedule.state) + self.assertEqual(("1", "2"), schedule.interval_item.interval) + + def test_get_monthly_by_id_2(self) -> None: + self.server.version = "3.15" + with open(GET_MONTHLY_ID_2_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + schedule_id = "8c5caf33-6223-4724-83c3-ccdc1e730a07" + baseurl = f"{self.server.baseurl}/schedules/{schedule_id}" + m.get(baseurl, text=response_xml) + schedule = self.server.schedules.get_by_id(schedule_id) + self.assertIsNotNone(schedule) + self.assertEqual(schedule_id, schedule.id) + self.assertEqual("Monthly First Monday!", schedule.name) + self.assertEqual("Active", schedule.state) + self.assertEqual(("Monday", "First"), schedule.interval_item.interval) + + def test_delete(self) -> None: with requests_mock.mock() as m: m.delete(self.baseurl + "/c9cff7f9-309c-4361-99ff-d4ba8c9f5467", status_code=204) self.server.schedules.delete("c9cff7f9-309c-4361-99ff-d4ba8c9f5467") - def test_create_hourly(self): + def test_create_hourly(self) -> None: with open(CREATE_HOURLY_XML, "rb") as f: response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.post(self.baseurl, text=response_xml) - hourly_interval = TSC.HourlyInterval(start_time=time(2, 30), - end_time=time(23, 0), - interval_value=2) - new_schedule = TSC.ScheduleItem("hourly-schedule-1", 50, TSC.ScheduleItem.Type.Extract, - TSC.ScheduleItem.ExecutionOrder.Parallel, hourly_interval) + hourly_interval = TSC.HourlyInterval(start_time=time(2, 30), end_time=time(23, 0), interval_value=2) + new_schedule = TSC.ScheduleItem( + "hourly-schedule-1", + 50, + TSC.ScheduleItem.Type.Extract, + TSC.ScheduleItem.ExecutionOrder.Parallel, + hourly_interval, + ) new_schedule = self.server.schedules.create(new_schedule) self.assertEqual("5f42be25-8a43-47ba-971a-63f2d4e7029c", new_schedule.id) @@ -105,17 +204,22 @@ def test_create_hourly(self): self.assertEqual("2016-09-16T01:30:00Z", format_datetime(new_schedule.next_run_at)) self.assertEqual(TSC.ScheduleItem.ExecutionOrder.Parallel, new_schedule.execution_order) self.assertEqual(time(2, 30), new_schedule.interval_item.start_time) - self.assertEqual(time(23), new_schedule.interval_item.end_time) - self.assertEqual("8", new_schedule.interval_item.interval) + self.assertEqual(time(23), new_schedule.interval_item.end_time) # type: ignore[union-attr] + self.assertEqual(("8",), new_schedule.interval_item.interval) # type: ignore[union-attr] - def test_create_daily(self): + def test_create_daily(self) -> None: with open(CREATE_DAILY_XML, "rb") as f: response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.post(self.baseurl, text=response_xml) daily_interval = TSC.DailyInterval(time(4, 50)) - new_schedule = TSC.ScheduleItem("daily-schedule-1", 90, TSC.ScheduleItem.Type.Subscription, - TSC.ScheduleItem.ExecutionOrder.Serial, daily_interval) + new_schedule = TSC.ScheduleItem( + "daily-schedule-1", + 90, + TSC.ScheduleItem.Type.Subscription, + TSC.ScheduleItem.ExecutionOrder.Serial, + daily_interval, + ) new_schedule = self.server.schedules.create(new_schedule) self.assertEqual("907cae38-72fd-417c-892a-95540c4664cd", new_schedule.id) @@ -129,16 +233,21 @@ def test_create_daily(self): self.assertEqual(TSC.ScheduleItem.ExecutionOrder.Serial, new_schedule.execution_order) self.assertEqual(time(4, 45), new_schedule.interval_item.start_time) - def test_create_weekly(self): + def test_create_weekly(self) -> None: with open(CREATE_WEEKLY_XML, "rb") as f: response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.post(self.baseurl, text=response_xml) - weekly_interval = TSC.WeeklyInterval(time(9, 15), TSC.IntervalItem.Day.Monday, - TSC.IntervalItem.Day.Wednesday, - TSC.IntervalItem.Day.Friday) - new_schedule = TSC.ScheduleItem("weekly-schedule-1", 80, TSC.ScheduleItem.Type.Extract, - TSC.ScheduleItem.ExecutionOrder.Parallel, weekly_interval) + weekly_interval = TSC.WeeklyInterval( + time(9, 15), TSC.IntervalItem.Day.Monday, TSC.IntervalItem.Day.Wednesday, TSC.IntervalItem.Day.Friday + ) + new_schedule = TSC.ScheduleItem( + "weekly-schedule-1", + 80, + TSC.ScheduleItem.Type.Extract, + TSC.ScheduleItem.ExecutionOrder.Parallel, + weekly_interval, + ) new_schedule = self.server.schedules.create(new_schedule) self.assertEqual("1adff386-6be0-4958-9f81-a35e676932bf", new_schedule.id) @@ -151,17 +260,24 @@ def test_create_weekly(self): self.assertEqual("2016-09-16T16:15:00Z", format_datetime(new_schedule.next_run_at)) self.assertEqual(TSC.ScheduleItem.ExecutionOrder.Parallel, new_schedule.execution_order) self.assertEqual(time(9, 15), new_schedule.interval_item.start_time) - self.assertEqual(("Monday", "Wednesday", "Friday"), - new_schedule.interval_item.interval) + self.assertEqual(("Monday", "Wednesday", "Friday"), new_schedule.interval_item.interval) + self.assertEqual(2, len(new_schedule.warnings)) + self.assertEqual("warning 1", new_schedule.warnings[0]) + self.assertEqual("warning 2", new_schedule.warnings[1]) - def test_create_monthly(self): + def test_create_monthly(self) -> None: with open(CREATE_MONTHLY_XML, "rb") as f: response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.post(self.baseurl, text=response_xml) monthly_interval = TSC.MonthlyInterval(time(7), 12) - new_schedule = TSC.ScheduleItem("monthly-schedule-1", 20, TSC.ScheduleItem.Type.Extract, - TSC.ScheduleItem.ExecutionOrder.Serial, monthly_interval) + new_schedule = TSC.ScheduleItem( + "monthly-schedule-1", + 20, + TSC.ScheduleItem.Type.Extract, + TSC.ScheduleItem.ExecutionOrder.Serial, + monthly_interval, + ) new_schedule = self.server.schedules.create(new_schedule) self.assertEqual("e06a7c75-5576-4f68-882d-8909d0219326", new_schedule.id) @@ -174,18 +290,23 @@ def test_create_monthly(self): self.assertEqual("2016-10-12T14:00:00Z", format_datetime(new_schedule.next_run_at)) self.assertEqual(TSC.ScheduleItem.ExecutionOrder.Serial, new_schedule.execution_order) self.assertEqual(time(7), new_schedule.interval_item.start_time) - self.assertEqual("12", new_schedule.interval_item.interval) + self.assertEqual(("12",), new_schedule.interval_item.interval) # type: ignore[union-attr] - def test_update(self): + def test_update(self) -> None: with open(UPDATE_XML, "rb") as f: response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.put(self.baseurl + '/7bea1766-1543-4052-9753-9d224bc069b5', text=response_xml) - new_interval = TSC.WeeklyInterval(time(7), TSC.IntervalItem.Day.Monday, - TSC.IntervalItem.Day.Friday) - single_schedule = TSC.ScheduleItem("weekly-schedule-1", 90, TSC.ScheduleItem.Type.Extract, - TSC.ScheduleItem.ExecutionOrder.Parallel, new_interval) + m.put(self.baseurl + "/7bea1766-1543-4052-9753-9d224bc069b5", text=response_xml) + new_interval = TSC.WeeklyInterval(time(7), TSC.IntervalItem.Day.Monday, TSC.IntervalItem.Day.Friday) + single_schedule = TSC.ScheduleItem( + "weekly-schedule-1", + 90, + TSC.ScheduleItem.Type.Extract, + TSC.ScheduleItem.ExecutionOrder.Parallel, + new_interval, + ) single_schedule._id = "7bea1766-1543-4052-9753-9d224bc069b5" + single_schedule.state = TSC.ScheduleItem.State.Suspended single_schedule = self.server.schedules.update(single_schedule) self.assertEqual("7bea1766-1543-4052-9753-9d224bc069b5", single_schedule.id) @@ -196,33 +317,91 @@ def test_update(self): self.assertEqual("2016-09-16T14:00:00Z", format_datetime(single_schedule.next_run_at)) self.assertEqual(TSC.ScheduleItem.ExecutionOrder.Parallel, single_schedule.execution_order) self.assertEqual(time(7), single_schedule.interval_item.start_time) - self.assertEqual(("Monday", "Friday"), - single_schedule.interval_item.interval) + self.assertEqual(("Monday", "Friday"), single_schedule.interval_item.interval) # type: ignore[union-attr] + self.assertEqual(TSC.ScheduleItem.State.Suspended, single_schedule.state) + + # Tests calling update with a schedule item returned from the server + def test_update_after_get(self) -> None: + with open(GET_XML, "rb") as f: + get_response_xml = f.read().decode("utf-8") + with open(UPDATE_XML, "rb") as f: + update_response_xml = f.read().decode("utf-8") - def test_add_workbook(self): + # Get a schedule + with requests_mock.mock() as m: + m.get(self.baseurl, text=get_response_xml) + all_schedules, pagination_item = self.server.schedules.get() + schedule_item = all_schedules[0] + self.assertEqual(TSC.ScheduleItem.State.Active, schedule_item.state) + self.assertEqual("Weekday early mornings", schedule_item.name) + + # Update the schedule + with requests_mock.mock() as m: + m.put(self.baseurl + "/c9cff7f9-309c-4361-99ff-d4ba8c9f5467", text=update_response_xml) + schedule_item.state = TSC.ScheduleItem.State.Suspended + schedule_item.name = "newName" + schedule_item = self.server.schedules.update(schedule_item) + + self.assertEqual(TSC.ScheduleItem.State.Suspended, schedule_item.state) + self.assertEqual("weekly-schedule-1", schedule_item.name) + + def test_add_workbook(self) -> None: self.server.version = "2.8" - baseurl = "{}/sites/{}/schedules".format(self.server.baseurl, self.server.site_id) + baseurl = f"{self.server.baseurl}/sites/{self.server.site_id}/schedules" with open(WORKBOOK_GET_BY_ID_XML, "rb") as f: workbook_response = f.read().decode("utf-8") + with open(ADD_WORKBOOK_TO_SCHEDULE, "rb") as f: + add_workbook_response = f.read().decode("utf-8") with requests_mock.mock() as m: - # TODO: Replace with real response - m.get(self.server.workbooks.baseurl + '/bar', text=workbook_response) - m.put(baseurl + '/foo/workbooks', text="OK") + m.get(self.server.workbooks.baseurl + "/bar", text=workbook_response) + m.put(baseurl + "/foo/workbooks", text=add_workbook_response) workbook = self.server.workbooks.get_by_id("bar") - result = self.server.schedules.add_to_schedule('foo', workbook=workbook) + result = self.server.schedules.add_to_schedule("foo", workbook=workbook) self.assertEqual(0, len(result), "Added properly") - def test_add_datasource(self): + def test_add_workbook_with_warnings(self) -> None: self.server.version = "2.8" - baseurl = "{}/sites/{}/schedules".format(self.server.baseurl, self.server.site_id) + baseurl = f"{self.server.baseurl}/sites/{self.server.site_id}/schedules" + + with open(WORKBOOK_GET_BY_ID_XML, "rb") as f: + workbook_response = f.read().decode("utf-8") + with open(ADD_WORKBOOK_TO_SCHEDULE_WITH_WARNINGS, "rb") as f: + add_workbook_response = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.server.workbooks.baseurl + "/bar", text=workbook_response) + m.put(baseurl + "/foo/workbooks", text=add_workbook_response) + workbook = self.server.workbooks.get_by_id("bar") + result = self.server.schedules.add_to_schedule("foo", workbook=workbook) + self.assertEqual(1, len(result), "Not added properly") + self.assertEqual(2, len(result[0].warnings)) + + def test_add_datasource(self) -> None: + self.server.version = "2.8" + baseurl = f"{self.server.baseurl}/sites/{self.server.site_id}/schedules" with open(DATASOURCE_GET_BY_ID_XML, "rb") as f: datasource_response = f.read().decode("utf-8") + with open(ADD_DATASOURCE_TO_SCHEDULE, "rb") as f: + add_datasource_response = f.read().decode("utf-8") with requests_mock.mock() as m: - # TODO: Replace with real response - m.get(self.server.datasources.baseurl + '/bar', text=datasource_response) - m.put(baseurl + '/foo/datasources', text="OK") + m.get(self.server.datasources.baseurl + "/bar", text=datasource_response) + m.put(baseurl + "/foo/datasources", text=add_datasource_response) datasource = self.server.datasources.get_by_id("bar") - result = self.server.schedules.add_to_schedule('foo', datasource=datasource) + result = self.server.schedules.add_to_schedule("foo", datasource=datasource) + self.assertEqual(0, len(result), "Added properly") + + def test_add_flow(self) -> None: + self.server.version = "3.3" + baseurl = f"{self.server.baseurl}/sites/{self.server.site_id}/schedules" + + with open(FLOW_GET_BY_ID_XML, "rb") as f: + flow_response = f.read().decode("utf-8") + with open(ADD_FLOW_TO_SCHEDULE, "rb") as f: + add_flow_response = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.server.flows.baseurl + "/bar", text=flow_response) + m.put(baseurl + "/foo/flows", text=flow_response) + flow = self.server.flows.get_by_id("bar") + result = self.server.schedules.add_to_schedule("foo", flow=flow) self.assertEqual(0, len(result), "Added properly") diff --git a/test/test_server_info.py b/test/test_server_info.py index 3dadff7c1..fa1472c9a 100644 --- a/test/test_server_info.py +++ b/test/test_server_info.py @@ -1,62 +1,75 @@ -import unittest import os.path +import unittest + import requests_mock + import tableauserverclient as TSC +from tableauserverclient.server.endpoint.exceptions import NonXMLResponseError -TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets') +TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets") -SERVER_INFO_GET_XML = os.path.join(TEST_ASSET_DIR, 'server_info_get.xml') -SERVER_INFO_25_XML = os.path.join(TEST_ASSET_DIR, 'server_info_25.xml') -SERVER_INFO_404 = os.path.join(TEST_ASSET_DIR, 'server_info_404.xml') -SERVER_INFO_AUTH_INFO_XML = os.path.join(TEST_ASSET_DIR, 'server_info_auth_info.xml') +SERVER_INFO_GET_XML = os.path.join(TEST_ASSET_DIR, "server_info_get.xml") +SERVER_INFO_25_XML = os.path.join(TEST_ASSET_DIR, "server_info_25.xml") +SERVER_INFO_404 = os.path.join(TEST_ASSET_DIR, "server_info_404.xml") +SERVER_INFO_AUTH_INFO_XML = os.path.join(TEST_ASSET_DIR, "server_info_auth_info.xml") +SERVER_INFO_WRONG_SITE = os.path.join(TEST_ASSET_DIR, "server_info_wrong_site.html") class ServerInfoTests(unittest.TestCase): def setUp(self): - self.server = TSC.Server('http://test') + self.server = TSC.Server("http://test", False) self.baseurl = self.server.server_info.baseurl self.server.version = "2.4" def test_server_info_get(self): - with open(SERVER_INFO_GET_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + with open(SERVER_INFO_GET_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.get(self.server.server_info.baseurl, text=response_xml) actual = self.server.server_info.get() - self.assertEqual('10.1.0', actual.product_version) - self.assertEqual('10100.16.1024.2100', actual.build_number) - self.assertEqual('2.4', actual.rest_api_version) + self.assertEqual("10.1.0", actual.product_version) + self.assertEqual("10100.16.1024.2100", actual.build_number) + self.assertEqual("3.10", actual.rest_api_version) def test_server_info_use_highest_version_downgrades(self): - with open(SERVER_INFO_AUTH_INFO_XML, 'rb') as f: + with open(SERVER_INFO_AUTH_INFO_XML, "rb") as f: # This is the auth.xml endpoint present back to 9.0 Servers - auth_response_xml = f.read().decode('utf-8') - with open(SERVER_INFO_404, 'rb') as f: + auth_response_xml = f.read().decode("utf-8") + with open(SERVER_INFO_404, "rb") as f: # 10.1 serverInfo response - si_response_xml = f.read().decode('utf-8') + si_response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: # Return a 404 for serverInfo so we can pretend this is an old Server m.get(self.server.server_address + "/api/2.4/serverInfo", text=si_response_xml, status_code=404) m.get(self.server.server_address + "/auth?format=xml", text=auth_response_xml) self.server.use_server_version() - self.assertEqual(self.server.version, '2.2') + # does server-version[9.2] lookup in PRODUCT_TO_REST_VERSION + self.assertEqual(self.server.version, "2.2") def test_server_info_use_highest_version_upgrades(self): - with open(SERVER_INFO_GET_XML, 'rb') as f: - si_response_xml = f.read().decode('utf-8') + with open(SERVER_INFO_GET_XML, "rb") as f: + si_response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.server.server_address + "/api/2.4/serverInfo", text=si_response_xml) + m.get(self.server.server_address + "/api/2.8/serverInfo", text=si_response_xml) # Pretend we're old - self.server.version = '2.0' + self.server.version = "2.8" self.server.use_server_version() - # Did we upgrade to 2.4? - self.assertEqual(self.server.version, '2.4') + # Did we upgrade to 3.10? + self.assertEqual(self.server.version, "3.10") def test_server_use_server_version_flag(self): - with open(SERVER_INFO_25_XML, 'rb') as f: - si_response_xml = f.read().decode('utf-8') + with open(SERVER_INFO_25_XML, "rb") as f: + si_response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get("http://test/api/2.4/serverInfo", text=si_response_xml) + server = TSC.Server("http://test", use_server_version=True) + self.assertEqual(server.version, "2.5") + + def test_server_wrong_site(self): + with open(SERVER_INFO_WRONG_SITE, "rb") as f: + response = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get('http://test/api/2.4/serverInfo', text=si_response_xml) - server = TSC.Server('http://test', use_server_version=True) - self.assertEqual(server.version, '2.5') + m.get(self.server.server_info.baseurl, text=response, status_code=404) + with self.assertRaises(NonXMLResponseError): + self.server.server_info.get() diff --git a/test/test_site.py b/test/test_site.py index 9603e73c2..96b75f9ff 100644 --- a/test/test_site.py +++ b/test/test_site.py @@ -1,140 +1,262 @@ -import unittest import os.path +import unittest + +import pytest import requests_mock + import tableauserverclient as TSC -TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets') +TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets") -GET_XML = os.path.join(TEST_ASSET_DIR, 'site_get.xml') -GET_BY_ID_XML = os.path.join(TEST_ASSET_DIR, 'site_get_by_id.xml') -GET_BY_NAME_XML = os.path.join(TEST_ASSET_DIR, 'site_get_by_name.xml') -UPDATE_XML = os.path.join(TEST_ASSET_DIR, 'site_update.xml') -CREATE_XML = os.path.join(TEST_ASSET_DIR, 'site_create.xml') +GET_XML = os.path.join(TEST_ASSET_DIR, "site_get.xml") +GET_BY_ID_XML = os.path.join(TEST_ASSET_DIR, "site_get_by_id.xml") +GET_BY_NAME_XML = os.path.join(TEST_ASSET_DIR, "site_get_by_name.xml") +UPDATE_XML = os.path.join(TEST_ASSET_DIR, "site_update.xml") +CREATE_XML = os.path.join(TEST_ASSET_DIR, "site_create.xml") class SiteTests(unittest.TestCase): - def setUp(self): - self.server = TSC.Server('http://test') + def setUp(self) -> None: + self.server = TSC.Server("http://test", False) + self.server.version = "3.10" # Fake signin - self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM' - self.server._site_id = '0626857c-1def-4503-a7d8-7907c3ff9d9f' + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + self.server._site_id = "0626857c-1def-4503-a7d8-7907c3ff9d9f" self.baseurl = self.server.sites.baseurl - def test_get(self): - with open(GET_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + # sites APIs can only be called on the site being logged in to + self.logged_in_site = self.server.site_id + + def test_get(self) -> None: + with open(GET_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.get(self.baseurl, text=response_xml) all_sites, pagination_item = self.server.sites.get() self.assertEqual(2, pagination_item.total_available) - self.assertEqual('dad65087-b08b-4603-af4e-2887b8aafc67', all_sites[0].id) - self.assertEqual('Active', all_sites[0].state) - self.assertEqual('Default', all_sites[0].name) - self.assertEqual('ContentOnly', all_sites[0].admin_mode) + self.assertEqual("dad65087-b08b-4603-af4e-2887b8aafc67", all_sites[0].id) + self.assertEqual("Active", all_sites[0].state) + self.assertEqual("Default", all_sites[0].name) + self.assertEqual("ContentOnly", all_sites[0].admin_mode) self.assertEqual(False, all_sites[0].revision_history_enabled) self.assertEqual(True, all_sites[0].subscribe_others_enabled) - - self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', all_sites[1].id) - self.assertEqual('Active', all_sites[1].state) - self.assertEqual('Samples', all_sites[1].name) - self.assertEqual('ContentOnly', all_sites[1].admin_mode) + self.assertEqual(25, all_sites[0].revision_limit) + self.assertEqual(None, all_sites[0].num_users) + self.assertEqual(None, all_sites[0].storage) + self.assertEqual(True, all_sites[0].cataloging_enabled) + self.assertEqual(False, all_sites[0].editing_flows_enabled) + self.assertEqual(False, all_sites[0].scheduling_flows_enabled) + self.assertEqual(True, all_sites[0].allow_subscription_attachments) + self.assertEqual("6b7179ba-b82b-4f0f-91ed-812074ac5da6", all_sites[1].id) + self.assertEqual("Active", all_sites[1].state) + self.assertEqual("Samples", all_sites[1].name) + self.assertEqual("ContentOnly", all_sites[1].admin_mode) self.assertEqual(False, all_sites[1].revision_history_enabled) self.assertEqual(True, all_sites[1].subscribe_others_enabled) + self.assertEqual(False, all_sites[1].guest_access_enabled) + self.assertEqual(True, all_sites[1].cache_warmup_enabled) + self.assertEqual(True, all_sites[1].commenting_enabled) + self.assertEqual(True, all_sites[1].cache_warmup_enabled) + self.assertEqual(False, all_sites[1].request_access_enabled) + self.assertEqual(True, all_sites[1].run_now_enabled) + self.assertEqual(1, all_sites[1].tier_explorer_capacity) + self.assertEqual(2, all_sites[1].tier_creator_capacity) + self.assertEqual(1, all_sites[1].tier_viewer_capacity) + self.assertEqual(False, all_sites[1].flows_enabled) + self.assertEqual(None, all_sites[1].data_acceleration_mode) - def test_get_before_signin(self): + def test_get_before_signin(self) -> None: self.server._auth_token = None self.assertRaises(TSC.NotSignedInError, self.server.sites.get) - def test_get_by_id(self): - with open(GET_BY_ID_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_get_by_id(self) -> None: + with open(GET_BY_ID_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.baseurl + '/dad65087-b08b-4603-af4e-2887b8aafc67', text=response_xml) - single_site = self.server.sites.get_by_id('dad65087-b08b-4603-af4e-2887b8aafc67') + m.get(self.baseurl + "/" + self.logged_in_site, text=response_xml) + single_site = self.server.sites.get_by_id(self.logged_in_site) - self.assertEqual('dad65087-b08b-4603-af4e-2887b8aafc67', single_site.id) - self.assertEqual('Active', single_site.state) - self.assertEqual('Default', single_site.name) - self.assertEqual('ContentOnly', single_site.admin_mode) + self.assertEqual(self.logged_in_site, single_site.id) + self.assertEqual("Active", single_site.state) + self.assertEqual("Default", single_site.name) + self.assertEqual("ContentOnly", single_site.admin_mode) self.assertEqual(False, single_site.revision_history_enabled) self.assertEqual(True, single_site.subscribe_others_enabled) self.assertEqual(False, single_site.disable_subscriptions) + self.assertEqual(False, single_site.data_alerts_enabled) + self.assertEqual(False, single_site.commenting_mentions_enabled) + self.assertEqual(True, single_site.catalog_obfuscation_enabled) - def test_get_by_id_missing_id(self): - self.assertRaises(ValueError, self.server.sites.get_by_id, '') + def test_get_by_id_missing_id(self) -> None: + self.assertRaises(ValueError, self.server.sites.get_by_id, "") - def test_get_by_name(self): - with open(GET_BY_NAME_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_get_by_name(self) -> None: + with open(GET_BY_NAME_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.baseurl + '/testsite?key=name', text=response_xml) - single_site = self.server.sites.get_by_name('testsite') + m.get(self.baseurl + "/testsite?key=name", text=response_xml) + single_site = self.server.sites.get_by_name("testsite") - self.assertEqual('dad65087-b08b-4603-af4e-2887b8aafc67', single_site.id) - self.assertEqual('Active', single_site.state) - self.assertEqual('testsite', single_site.name) - self.assertEqual('ContentOnly', single_site.admin_mode) + self.assertEqual(self.logged_in_site, single_site.id) + self.assertEqual("Active", single_site.state) + self.assertEqual("testsite", single_site.name) + self.assertEqual("ContentOnly", single_site.admin_mode) self.assertEqual(False, single_site.revision_history_enabled) self.assertEqual(True, single_site.subscribe_others_enabled) self.assertEqual(False, single_site.disable_subscriptions) - def test_get_by_name_missing_name(self): - self.assertRaises(ValueError, self.server.sites.get_by_name, '') + def test_get_by_name_missing_name(self) -> None: + self.assertRaises(ValueError, self.server.sites.get_by_name, "") - def test_update(self): - with open(UPDATE_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + @pytest.mark.filterwarnings("ignore:Tiered license level is set") + @pytest.mark.filterwarnings("ignore:FlowsEnabled has been removed") + def test_update(self) -> None: + with open(UPDATE_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.put(self.baseurl + '/6b7179ba-b82b-4f0f-91ed-812074ac5da6', text=response_xml) - single_site = TSC.SiteItem(name='Tableau', content_url='tableau', - admin_mode=TSC.SiteItem.AdminMode.ContentAndUsers, - user_quota=15, storage_quota=1000, - disable_subscriptions=True, revision_history_enabled=False, - materialized_views_mode='disable') - single_site._id = '6b7179ba-b82b-4f0f-91ed-812074ac5da6' + m.put(self.baseurl + "/" + self.logged_in_site, text=response_xml) + single_site = TSC.SiteItem( + name="Tableau", + content_url="tableau", + admin_mode=TSC.SiteItem.AdminMode.ContentAndUsers, + user_quota=15, + storage_quota=1000, + disable_subscriptions=True, + revision_history_enabled=False, + data_acceleration_mode="disable", + flow_auto_save_enabled=True, + web_extraction_enabled=False, + metrics_content_type_enabled=True, + notify_site_admins_on_throttle=False, + authoring_enabled=True, + custom_subscription_email_enabled=True, + custom_subscription_email="test@test.com", + custom_subscription_footer_enabled=True, + custom_subscription_footer="example_footer", + ask_data_mode="EnabledByDefault", + named_sharing_enabled=False, + mobile_biometrics_enabled=True, + sheet_image_enabled=False, + derived_permissions_enabled=True, + user_visibility_mode="FULL", + use_default_time_zone=False, + time_zone="America/Los_Angeles", + auto_suspend_refresh_enabled=True, + auto_suspend_refresh_inactivity_window=55, + tier_creator_capacity=5, + tier_explorer_capacity=5, + tier_viewer_capacity=5, + ) + single_site._id = self.logged_in_site + self.server.sites.parent_srv = self.server single_site = self.server.sites.update(single_site) - self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', single_site.id) - self.assertEqual('tableau', single_site.content_url) - self.assertEqual('Suspended', single_site.state) - self.assertEqual('Tableau', single_site.name) - self.assertEqual('ContentAndUsers', single_site.admin_mode) + self.assertEqual(self.logged_in_site, single_site.id) + self.assertEqual("tableau", single_site.content_url) + self.assertEqual("Suspended", single_site.state) + self.assertEqual("Tableau", single_site.name) + self.assertEqual("ContentAndUsers", single_site.admin_mode) self.assertEqual(True, single_site.revision_history_enabled) self.assertEqual(13, single_site.revision_limit) self.assertEqual(True, single_site.disable_subscriptions) - self.assertEqual(15, single_site.user_quota) - self.assertEqual('disable', single_site.materialized_views_mode) + self.assertEqual(None, single_site.user_quota) + self.assertEqual(5, single_site.tier_creator_capacity) + self.assertEqual(5, single_site.tier_explorer_capacity) + self.assertEqual(5, single_site.tier_viewer_capacity) + self.assertEqual("disable", single_site.data_acceleration_mode) + self.assertEqual(True, single_site.flows_enabled) + self.assertEqual(True, single_site.cataloging_enabled) + self.assertEqual(True, single_site.flow_auto_save_enabled) + self.assertEqual(False, single_site.web_extraction_enabled) + self.assertEqual(True, single_site.metrics_content_type_enabled) + self.assertEqual(False, single_site.notify_site_admins_on_throttle) + self.assertEqual(True, single_site.authoring_enabled) + self.assertEqual(True, single_site.custom_subscription_email_enabled) + self.assertEqual("test@test.com", single_site.custom_subscription_email) + self.assertEqual(True, single_site.custom_subscription_footer_enabled) + self.assertEqual("example_footer", single_site.custom_subscription_footer) + self.assertEqual("EnabledByDefault", single_site.ask_data_mode) + self.assertEqual(False, single_site.named_sharing_enabled) + self.assertEqual(True, single_site.mobile_biometrics_enabled) + self.assertEqual(False, single_site.sheet_image_enabled) + self.assertEqual(True, single_site.derived_permissions_enabled) + self.assertEqual("FULL", single_site.user_visibility_mode) + self.assertEqual(False, single_site.use_default_time_zone) + self.assertEqual("America/Los_Angeles", single_site.time_zone) + self.assertEqual(True, single_site.auto_suspend_refresh_enabled) + self.assertEqual(55, single_site.auto_suspend_refresh_inactivity_window) - def test_update_missing_id(self): - single_site = TSC.SiteItem('test', 'test') + def test_update_missing_id(self) -> None: + single_site = TSC.SiteItem("test", "test") self.assertRaises(TSC.MissingRequiredFieldError, self.server.sites.update, single_site) - def test_create(self): - with open(CREATE_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_null_site_quota(self) -> None: + test_site = TSC.SiteItem("testname", "testcontenturl", tier_explorer_capacity=1, user_quota=None) + assert test_site.tier_explorer_capacity == 1 + with self.assertRaises(ValueError): + test_site.user_quota = 1 + test_site.tier_explorer_capacity = None + test_site.user_quota = 1 + + def test_replace_license_tiers_with_user_quota(self) -> None: + test_site = TSC.SiteItem("testname", "testcontenturl", tier_explorer_capacity=1, user_quota=None) + assert test_site.tier_explorer_capacity == 1 + with self.assertRaises(ValueError): + test_site.user_quota = 1 + test_site.replace_license_tiers_with_user_quota(1) + self.assertEqual(1, test_site.user_quota) + self.assertIsNone(test_site.tier_explorer_capacity) + + @pytest.mark.filterwarnings("ignore:FlowsEnabled has been removed") + def test_create(self) -> None: + with open(CREATE_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.post(self.baseurl, text=response_xml) - new_site = TSC.SiteItem(name='Tableau', content_url='tableau', - admin_mode=TSC.SiteItem.AdminMode.ContentAndUsers, user_quota=15, - storage_quota=1000, disable_subscriptions=True) + new_site = TSC.SiteItem( + name="Tableau", + content_url="tableau", + admin_mode=TSC.SiteItem.AdminMode.ContentAndUsers, + user_quota=15, + storage_quota=1000, + disable_subscriptions=True, + ) new_site = self.server.sites.create(new_site) - self.assertEqual('0626857c-1def-4503-a7d8-7907c3ff9d9f', new_site.id) - self.assertEqual('tableau', new_site.content_url) - self.assertEqual('Tableau', new_site.name) - self.assertEqual('Active', new_site.state) - self.assertEqual('ContentAndUsers', new_site.admin_mode) + new_site._tier_viewer_capacity = None + new_site._tier_creator_capacity = None + new_site._tier_explorer_capacity = None + self.assertEqual("0626857c-1def-4503-a7d8-7907c3ff9d9f", new_site.id) + self.assertEqual("tableau", new_site.content_url) + self.assertEqual("Tableau", new_site.name) + self.assertEqual("Active", new_site.state) + self.assertEqual("ContentAndUsers", new_site.admin_mode) self.assertEqual(False, new_site.revision_history_enabled) self.assertEqual(True, new_site.subscribe_others_enabled) self.assertEqual(True, new_site.disable_subscriptions) self.assertEqual(15, new_site.user_quota) - def test_delete(self): + def test_delete(self) -> None: + with requests_mock.mock() as m: + m.delete(self.baseurl + "/0626857c-1def-4503-a7d8-7907c3ff9d9f", status_code=204) + self.server.sites.delete("0626857c-1def-4503-a7d8-7907c3ff9d9f") + + def test_delete_missing_id(self) -> None: + self.assertRaises(ValueError, self.server.sites.delete, "") + + def test_encrypt(self) -> None: with requests_mock.mock() as m: - m.delete(self.baseurl + '/0626857c-1def-4503-a7d8-7907c3ff9d9f', status_code=204) - self.server.sites.delete('0626857c-1def-4503-a7d8-7907c3ff9d9f') + m.post(self.baseurl + "/0626857c-1def-4503-a7d8-7907c3ff9d9f/encrypt-extracts", status_code=200) + self.server.sites.encrypt_extracts("0626857c-1def-4503-a7d8-7907c3ff9d9f") - def test_delete_missing_id(self): - self.assertRaises(ValueError, self.server.sites.delete, '') + def test_recrypt(self) -> None: + with requests_mock.mock() as m: + m.post(self.baseurl + "/0626857c-1def-4503-a7d8-7907c3ff9d9f/reencrypt-extracts", status_code=200) + self.server.sites.re_encrypt_extracts("0626857c-1def-4503-a7d8-7907c3ff9d9f") + + def test_decrypt(self) -> None: + with requests_mock.mock() as m: + m.post(self.baseurl + "/0626857c-1def-4503-a7d8-7907c3ff9d9f/decrypt-extracts", status_code=200) + self.server.sites.decrypt_extracts("0626857c-1def-4503-a7d8-7907c3ff9d9f") diff --git a/test/test_site_model.py b/test/test_site_model.py index 99fa73ce9..60ad9c5e5 100644 --- a/test/test_site_model.py +++ b/test/test_site_model.py @@ -1,6 +1,5 @@ -# coding=utf-8 - import unittest + import tableauserverclient as TSC @@ -21,7 +20,6 @@ def test_invalid_admin_mode(self): site.admin_mode = "Hello" def test_invalid_content_url(self): - with self.assertRaises(ValueError): site = TSC.SiteItem(name="蚵仔煎", content_url="蚵仔煎") diff --git a/test/test_sort.py b/test/test_sort.py index 88c0da728..8eebef6f4 100644 --- a/test/test_sort.py +++ b/test/test_sort.py @@ -1,15 +1,17 @@ +import re import unittest -import os -import requests + import requests_mock + import tableauserverclient as TSC class SortTests(unittest.TestCase): def setUp(self): - self.server = TSC.Server('http://test') - self.server._site_id = 'dad65087-b08b-4603-af4e-2887b8aafc67' - self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM' + self.server = TSC.Server("http://test", False) + self.server.version = "3.10" + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" self.baseurl = self.server.workbooks.baseurl def test_empty_filter(self): @@ -20,24 +22,17 @@ def test_filter_equals(self): m.get(requests_mock.ANY) url = "http://test/api/2.3/sites/dad65087-b08b-4603-af4e-2887b8aafc67/workbooks" opts = TSC.RequestOptions(pagesize=13, pagenumber=13) - opts.filter.add(TSC.Filter(TSC.RequestOptions.Field.Name, - TSC.RequestOptions.Operator.Equals, - 'Superstore')) + opts.filter.add(TSC.Filter(TSC.RequestOptions.Field.Name, TSC.RequestOptions.Operator.Equals, "Superstore")) - resp = self.server.workbooks._make_request(requests.get, - url, - content=None, - request_object=opts, - auth_token='j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM', - content_type='text/xml') + resp = self.server.workbooks.get_request(url, request_object=opts) - self.assertEqual(resp.request.query, 'pagenumber=13&pagesize=13&filter=name:eq:superstore') + self.assertTrue(re.search("pagenumber=13", resp.request.query)) + self.assertTrue(re.search("pagesize=13", resp.request.query)) + self.assertTrue(re.search("filter=name%3aeq%3asuperstore", resp.request.query)) def test_filter_equals_list(self): with self.assertRaises(ValueError) as cm: - TSC.Filter(TSC.RequestOptions.Field.Tags, - TSC.RequestOptions.Operator.Equals, - ['foo', 'bar']) + TSC.Filter(TSC.RequestOptions.Field.Tags, TSC.RequestOptions.Operator.Equals, ["foo", "bar"]) self.assertEqual("Filter values can only be a list if the operator is 'in'.", str(cm.exception)), @@ -47,35 +42,27 @@ def test_filter_in(self): url = "http://test/api/2.3/sites/dad65087-b08b-4603-af4e-2887b8aafc67/workbooks" opts = TSC.RequestOptions(pagesize=13, pagenumber=13) - opts.filter.add(TSC.Filter(TSC.RequestOptions.Field.Tags, - TSC.RequestOptions.Operator.In, - ['stocks', 'market'])) - - resp = self.server.workbooks._make_request(requests.get, - url, - content=None, - request_object=opts, - auth_token='j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM', - content_type='text/xml') + opts.filter.add( + TSC.Filter(TSC.RequestOptions.Field.Tags, TSC.RequestOptions.Operator.In, ["stocks", "market"]) + ) - self.assertEqual(resp.request.query, 'pagenumber=13&pagesize=13&filter=tags:in:[stocks,market]') + resp = self.server.workbooks.get_request(url, request_object=opts) + self.assertTrue(re.search("pagenumber=13", resp.request.query)) + self.assertTrue(re.search("pagesize=13", resp.request.query)) + self.assertTrue(re.search("filter=tags%3ain%3a%5bstocks%2cmarket%5d", resp.request.query)) def test_sort_asc(self): with requests_mock.mock() as m: m.get(requests_mock.ANY) url = "http://test/api/2.3/sites/dad65087-b08b-4603-af4e-2887b8aafc67/workbooks" opts = TSC.RequestOptions(pagesize=13, pagenumber=13) - opts.sort.add(TSC.Sort(TSC.RequestOptions.Field.Name, - TSC.RequestOptions.Direction.Asc)) + opts.sort.add(TSC.Sort(TSC.RequestOptions.Field.Name, TSC.RequestOptions.Direction.Asc)) - resp = self.server.workbooks._make_request(requests.get, - url, - content=None, - request_object=opts, - auth_token='j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM', - content_type='text/xml') + resp = self.server.workbooks.get_request(url, request_object=opts) - self.assertEqual(resp.request.query, 'pagenumber=13&pagesize=13&sort=name:asc') + self.assertTrue(re.search("pagenumber=13", resp.request.query)) + self.assertTrue(re.search("pagesize=13", resp.request.query)) + self.assertTrue(re.search("sort=name%3aasc", resp.request.query)) def test_filter_combo(self): with requests_mock.mock() as m: @@ -83,25 +70,34 @@ def test_filter_combo(self): url = "http://test/api/2.3/sites/dad65087-b08b-4603-af4e-2887b8aafc67/users" opts = TSC.RequestOptions(pagesize=13, pagenumber=13) - opts.filter.add(TSC.Filter(TSC.RequestOptions.Field.LastLogin, - TSC.RequestOptions.Operator.GreaterThanOrEqual, - '2017-01-15T00:00:00:00Z')) + opts.filter.add( + TSC.Filter( + TSC.RequestOptions.Field.LastLogin, + TSC.RequestOptions.Operator.GreaterThanOrEqual, + "2017-01-15T00:00:00:00Z", + ) + ) - opts.filter.add(TSC.Filter(TSC.RequestOptions.Field.SiteRole, - TSC.RequestOptions.Operator.Equals, - 'Publisher')) + opts.filter.add( + TSC.Filter(TSC.RequestOptions.Field.SiteRole, TSC.RequestOptions.Operator.Equals, "Publisher") + ) - resp = self.server.workbooks._make_request(requests.get, - url, - content=None, - request_object=opts, - auth_token='j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM', - content_type='text/xml') + resp = self.server.workbooks.get_request(url, request_object=opts) - expected = 'pagenumber=13&pagesize=13&filter=lastlogin:gte:2017-01-15t00:00:00:00z,siterole:eq:publisher' + expected = ( + "pagenumber=13&pagesize=13&filter=lastlogin%3agte%3a" + "2017-01-15t00%3a00%3a00%3a00z%2csiterole%3aeq%3apublisher" + ) - self.assertEqual(resp.request.query, expected) + self.assertTrue(re.search("pagenumber=13", resp.request.query)) + self.assertTrue(re.search("pagesize=13", resp.request.query)) + self.assertTrue( + re.search( + "filter=lastlogin%3agte%3a2017-01-15t00%3a00%3a00%3a00z%2csiterole%3aeq%3apublisher", + resp.request.query, + ) + ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_subscription.py b/test/test_subscription.py index 2e4b1eadf..45dcb0a1c 100644 --- a/test/test_subscription.py +++ b/test/test_subscription.py @@ -1,6 +1,8 @@ -import unittest import os +import unittest + import requests_mock + import tableauserverclient as TSC TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets") @@ -11,9 +13,9 @@ class SubscriptionTests(unittest.TestCase): - def setUp(self): - self.server = TSC.Server("http://test") - self.server.version = '2.6' + def setUp(self) -> None: + self.server = TSC.Server("http://test", False) + self.server.version = "2.6" # Fake Signin self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" @@ -21,44 +23,68 @@ def setUp(self): self.baseurl = self.server.subscriptions.baseurl - def test_get_subscriptions(self): + def test_get_subscriptions(self) -> None: with open(GET_XML, "rb") as f: response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.get(self.baseurl, text=response_xml) all_subscriptions, pagination_item = self.server.subscriptions.get() + self.assertEqual(2, pagination_item.total_available) subscription = all_subscriptions[0] - self.assertEqual('382e9a6e-0c08-4a95-b6c1-c14df7bac3e4', subscription.id) - self.assertEqual('View', subscription.target.type) - self.assertEqual('cdd716ca-5818-470e-8bec-086885dbadee', subscription.target.id) - self.assertEqual('c0d5fc44-ad8c-4957-bec0-b70ed0f8df1e', subscription.user_id) - self.assertEqual('Not Found Alert', subscription.subject) - self.assertEqual('7617c389-cdca-4940-a66e-69956fcebf3e', subscription.schedule_id) - - def test_get_subscription_by_id(self): + self.assertEqual("382e9a6e-0c08-4a95-b6c1-c14df7bac3e4", subscription.id) + self.assertEqual("NOT FOUND!", subscription.message) + self.assertTrue(subscription.attach_image) + self.assertFalse(subscription.attach_pdf) + self.assertFalse(subscription.suspended) + self.assertFalse(subscription.send_if_view_empty) + self.assertIsNone(subscription.page_orientation) + self.assertIsNone(subscription.page_size_option) + self.assertEqual("Not Found Alert", subscription.subject) + self.assertEqual("cdd716ca-5818-470e-8bec-086885dbadee", subscription.target.id) + self.assertEqual("View", subscription.target.type) + self.assertEqual("c0d5fc44-ad8c-4957-bec0-b70ed0f8df1e", subscription.user_id) + self.assertEqual("7617c389-cdca-4940-a66e-69956fcebf3e", subscription.schedule_id) + + subscription = all_subscriptions[1] + self.assertEqual("23cb7630-afc8-4c8e-b6cd-83ae0322ec66", subscription.id) + self.assertEqual("overview", subscription.message) + self.assertFalse(subscription.attach_image) + self.assertTrue(subscription.attach_pdf) + self.assertTrue(subscription.suspended) + self.assertTrue(subscription.send_if_view_empty) + self.assertEqual("PORTRAIT", subscription.page_orientation) + self.assertEqual("A5", subscription.page_size_option) + self.assertEqual("Last 7 Days", subscription.subject) + self.assertEqual("2e6b4e8f-22dd-4061-8f75-bf33703da7e5", subscription.target.id) + self.assertEqual("Workbook", subscription.target.type) + self.assertEqual("c0d5fc44-ad8c-4957-bec0-b70ed0f8df1e", subscription.user_id) + self.assertEqual("3407cd38-7b39-4983-86a6-67a1506a5e3f", subscription.schedule_id) + + def test_get_subscription_by_id(self) -> None: with open(GET_XML_BY_ID, "rb") as f: response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.baseurl + '/382e9a6e-0c08-4a95-b6c1-c14df7bac3e4', text=response_xml) - subscription = self.server.subscriptions.get_by_id('382e9a6e-0c08-4a95-b6c1-c14df7bac3e4') - - self.assertEqual('382e9a6e-0c08-4a95-b6c1-c14df7bac3e4', subscription.id) - self.assertEqual('View', subscription.target.type) - self.assertEqual('cdd716ca-5818-470e-8bec-086885dbadee', subscription.target.id) - self.assertEqual('c0d5fc44-ad8c-4957-bec0-b70ed0f8df1e', subscription.user_id) - self.assertEqual('Not Found Alert', subscription.subject) - self.assertEqual('7617c389-cdca-4940-a66e-69956fcebf3e', subscription.schedule_id) - - def test_create_subscription(self): - with open(CREATE_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + m.get(self.baseurl + "/382e9a6e-0c08-4a95-b6c1-c14df7bac3e4", text=response_xml) + subscription = self.server.subscriptions.get_by_id("382e9a6e-0c08-4a95-b6c1-c14df7bac3e4") + + self.assertEqual("382e9a6e-0c08-4a95-b6c1-c14df7bac3e4", subscription.id) + self.assertEqual("View", subscription.target.type) + self.assertEqual("cdd716ca-5818-470e-8bec-086885dbadee", subscription.target.id) + self.assertEqual("c0d5fc44-ad8c-4957-bec0-b70ed0f8df1e", subscription.user_id) + self.assertEqual("Not Found Alert", subscription.subject) + self.assertEqual("7617c389-cdca-4940-a66e-69956fcebf3e", subscription.schedule_id) + + def test_create_subscription(self) -> None: + with open(CREATE_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.post(self.baseurl, text=response_xml) target_item = TSC.Target("960e61f2-1838-40b2-bba2-340c9492f943", "workbook") - new_subscription = TSC.SubscriptionItem("subject", "4906c453-d5ec-4972-9ff4-789b629bdfa2", - "8d30c8de-0a5f-4bee-b266-c621b4f3eed0", target_item) + new_subscription = TSC.SubscriptionItem( + "subject", "4906c453-d5ec-4972-9ff4-789b629bdfa2", "8d30c8de-0a5f-4bee-b266-c621b4f3eed0", target_item + ) new_subscription = self.server.subscriptions.create(new_subscription) self.assertEqual("78e9318d-2d29-4d67-b60f-3f2f5fd89ecc", new_subscription.id) @@ -68,7 +94,7 @@ def test_create_subscription(self): self.assertEqual("4906c453-d5ec-4972-9ff4-789b629bdfa2", new_subscription.schedule_id) self.assertEqual("8d30c8de-0a5f-4bee-b266-c621b4f3eed0", new_subscription.user_id) - def test_delete_subscription(self): + def test_delete_subscription(self) -> None: with requests_mock.mock() as m: - m.delete(self.baseurl + '/78e9318d-2d29-4d67-b60f-3f2f5fd89ecc', status_code=204) - self.server.subscriptions.delete('78e9318d-2d29-4d67-b60f-3f2f5fd89ecc') + m.delete(self.baseurl + "/78e9318d-2d29-4d67-b60f-3f2f5fd89ecc", status_code=204) + self.server.subscriptions.delete("78e9318d-2d29-4d67-b60f-3f2f5fd89ecc") diff --git a/test/test_table.py b/test/test_table.py new file mode 100644 index 000000000..8c6c71f76 --- /dev/null +++ b/test/test_table.py @@ -0,0 +1,59 @@ +import unittest + +import requests_mock + +import tableauserverclient as TSC +from ._utils import read_xml_asset + +GET_XML = "table_get.xml" +UPDATE_XML = "table_update.xml" + + +class TableTests(unittest.TestCase): + def setUp(self): + self.server = TSC.Server("http://test", False) + + # Fake signin + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + self.server.version = "3.5" + + self.baseurl = self.server.tables.baseurl + + def test_get(self): + response_xml = read_xml_asset(GET_XML) + with requests_mock.mock() as m: + m.get(self.baseurl, text=response_xml) + all_tables, pagination_item = self.server.tables.get() + + self.assertEqual(4, pagination_item.total_available) + self.assertEqual("10224773-ecee-42ac-b822-d786b0b8e4d9", all_tables[0].id) + self.assertEqual("dim_Product", all_tables[0].name) + + self.assertEqual("53c77bc1-fb41-4342-a75a-f68ac0656d0d", all_tables[1].id) + self.assertEqual("customer", all_tables[1].name) + self.assertEqual("dbo", all_tables[1].schema) + self.assertEqual("9324cf6b-ba72-4b8e-b895-ac3f28d2f0e0", all_tables[1].contact_id) + self.assertEqual(False, all_tables[1].certified) + + def test_update(self): + response_xml = read_xml_asset(UPDATE_XML) + with requests_mock.mock() as m: + m.put(self.baseurl + "/10224773-ecee-42ac-b822-d786b0b8e4d9", text=response_xml) + single_table = TSC.TableItem("test") + single_table._id = "10224773-ecee-42ac-b822-d786b0b8e4d9" + + single_table.contact_id = "8e1a8235-c9ee-4d61-ae82-2ffacceed8e0" + single_table.certified = True + single_table.certification_note = "Test" + single_table = self.server.tables.update(single_table) + + self.assertEqual("10224773-ecee-42ac-b822-d786b0b8e4d9", single_table.id) + self.assertEqual("8e1a8235-c9ee-4d61-ae82-2ffacceed8e0", single_table.contact_id) + self.assertEqual(True, single_table.certified) + self.assertEqual("Test", single_table.certification_note) + + def test_delete(self): + with requests_mock.mock() as m: + m.delete(self.baseurl + "/0448d2ed-590d-4fa0-b272-a2a8a24555b5", status_code=204) + self.server.tables.delete("0448d2ed-590d-4fa0-b272-a2a8a24555b5") diff --git a/test/test_tableauauth_model.py b/test/test_tableauauth_model.py index 94a44706a..195bcf0a9 100644 --- a/test/test_tableauauth_model.py +++ b/test/test_tableauauth_model.py @@ -1,25 +1,12 @@ import unittest -import warnings + import tableauserverclient as TSC class TableauAuthModelTests(unittest.TestCase): def setUp(self): - self.auth = TSC.TableauAuth('user', - 'password', - site_id='site1', - user_id_to_impersonate='admin') + self.auth = TSC.TableauAuth("user", "password", site_id="site1", user_id_to_impersonate="admin") def test_username_password_required(self): with self.assertRaises(TypeError): TSC.TableauAuth() - - def test_site_arg_raises_warning(self): - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - - tableau_auth = TSC.TableauAuth('user', - 'password', - site='Default') - - self.assertTrue(any(item.category == DeprecationWarning for item in w)) diff --git a/test/test_tagging.py b/test/test_tagging.py new file mode 100644 index 000000000..23dffebfb --- /dev/null +++ b/test/test_tagging.py @@ -0,0 +1,230 @@ +from contextlib import ExitStack +import re +from collections.abc import Iterable +import uuid +from xml.etree import ElementTree as ET + +import pytest +import requests_mock +import tableauserverclient as TSC + + +@pytest.fixture +def get_server() -> TSC.Server: + server = TSC.Server("http://test", False) + + # Fake sign in + server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + server.version = "3.28" + return server + + +def add_tag_xml_response_factory(tags: Iterable[str]) -> str: + root = ET.Element("tsResponse") + tags_element = ET.SubElement(root, "tags") + for tag in tags: + tag_element = ET.SubElement(tags_element, "tag") + tag_element.attrib["label"] = tag + root.attrib["xmlns"] = "http://tableau.com/api" + return ET.tostring(root, encoding="utf-8").decode("utf-8") + + +def batch_add_tags_xml_response_factory(tags, content): + root = ET.Element("tsResponse") + tag_batch = ET.SubElement(root, "tagBatch") + tags_element = ET.SubElement(tag_batch, "tags") + for tag in tags: + tag_element = ET.SubElement(tags_element, "tag") + tag_element.attrib["label"] = tag + contents_element = ET.SubElement(tag_batch, "contents") + for item in content: + content_elem = ET.SubElement(contents_element, "content") + content_elem.attrib["id"] = item.id or "some_id" + t = item.__class__.__name__.replace("Item", "") or "" + content_elem.attrib["contentType"] = t + root.attrib["xmlns"] = "http://tableau.com/api" + return ET.tostring(root, encoding="utf-8").decode("utf-8") + + +def make_workbook() -> TSC.WorkbookItem: + workbook = TSC.WorkbookItem("project", "test") + workbook._id = str(uuid.uuid4()) + return workbook + + +def make_view() -> TSC.ViewItem: + view = TSC.ViewItem() + view._id = str(uuid.uuid4()) + return view + + +def make_datasource() -> TSC.DatasourceItem: + datasource = TSC.DatasourceItem("project", "test") + datasource._id = str(uuid.uuid4()) + return datasource + + +def make_table() -> TSC.TableItem: + table = TSC.TableItem("project", "test") + table._id = str(uuid.uuid4()) + return table + + +def make_database() -> TSC.DatabaseItem: + database = TSC.DatabaseItem("project", "test") + database._id = str(uuid.uuid4()) + return database + + +def make_flow() -> TSC.FlowItem: + flow = TSC.FlowItem("project", "test") + flow._id = str(uuid.uuid4()) + return flow + + +def make_vconn() -> TSC.VirtualConnectionItem: + vconn = TSC.VirtualConnectionItem("test") + vconn._id = str(uuid.uuid4()) + return vconn + + +sample_taggable_items = ( + [ + ("workbooks", make_workbook()), + ("workbooks", "some_id"), + ("views", make_view()), + ("views", "some_id"), + ("datasources", make_datasource()), + ("datasources", "some_id"), + ("tables", make_table()), + ("tables", "some_id"), + ("databases", make_database()), + ("databases", "some_id"), + ("flows", make_flow()), + ("flows", "some_id"), + ("virtual_connections", make_vconn()), + ("virtual_connections", "some_id"), + ], +) + +sample_tags = [ + "a", + ["a", "b"], + ["a", "b", "c", "c"], +] + + +@pytest.mark.parametrize("endpoint_type, item", *sample_taggable_items) +@pytest.mark.parametrize("tags", sample_tags) +def test_add_tags(get_server, endpoint_type, item, tags) -> None: + add_tags_xml = add_tag_xml_response_factory(tags) + endpoint = getattr(get_server, endpoint_type) + id_ = getattr(item, "id", item) + + with requests_mock.mock() as m: + m.put( + f"{endpoint.baseurl}/{id_}/tags", + status_code=200, + text=add_tags_xml, + ) + tag_result = endpoint.add_tags(item, tags) + + if isinstance(tags, str): + tags = [tags] + assert set(tag_result) == set(tags) + + +@pytest.mark.parametrize("endpoint_type, item", *sample_taggable_items) +@pytest.mark.parametrize("tags", sample_tags) +def test_delete_tags(get_server, endpoint_type, item, tags) -> None: + add_tags_xml = add_tag_xml_response_factory(tags) + endpoint = getattr(get_server, endpoint_type) + id_ = getattr(item, "id", item) + + if isinstance(tags, str): + tags = [tags] + tag_paths = "|".join(tags) + tag_paths = f"({tag_paths})" + matcher = re.compile(rf"{endpoint.baseurl}\/{id_}\/tags\/{tag_paths}") + with requests_mock.mock() as m: + m.delete( + matcher, + status_code=200, + text=add_tags_xml, + ) + endpoint.delete_tags(item, tags) + history = m.request_history + + tag_set = set(tags) + assert len(history) == len(tag_set) + urls = {r.url.split("/")[-1] for r in history} + assert urls == tag_set + + +@pytest.mark.parametrize("endpoint_type, item", *sample_taggable_items) +@pytest.mark.parametrize("tags", sample_tags) +def test_update_tags(get_server, endpoint_type, item, tags) -> None: + endpoint = getattr(get_server, endpoint_type) + id_ = getattr(item, "id", item) + tags = set([tags] if isinstance(tags, str) else tags) + with ExitStack() as stack: + if isinstance(item, str): + stack.enter_context(pytest.raises((ValueError, NotImplementedError))) + elif hasattr(item, "_initial_tags"): + initial_tags = {"x", "y", "z"} + item._initial_tags = initial_tags + add_tags_xml = add_tag_xml_response_factory(tags - initial_tags) + delete_tags_xml = add_tag_xml_response_factory(initial_tags - tags) + m = stack.enter_context(requests_mock.mock()) + m.put( + f"{endpoint.baseurl}/{id_}/tags", + status_code=200, + text=add_tags_xml, + ) + + tag_paths = "|".join(initial_tags - tags) + tag_paths = f"({tag_paths})" + matcher = re.compile(rf"{endpoint.baseurl}\/{id_}\/tags\/{tag_paths}") + m.delete( + matcher, + status_code=200, + text=delete_tags_xml, + ) + + else: + stack.enter_context(pytest.raises(NotImplementedError)) + + endpoint.update_tags(item) + + +def test_tags_batch_add(get_server) -> None: + server = get_server + content = [make_workbook(), make_view(), make_datasource(), make_table(), make_database()] + tags = ["a", "b"] + add_tags_xml = batch_add_tags_xml_response_factory(tags, content) + with requests_mock.mock() as m: + m.put( + f"{server.tags.baseurl}:batchCreate", + status_code=200, + text=add_tags_xml, + ) + tag_result = server.tags.batch_add(tags, content) + + assert set(tag_result) == set(tags) + + +def test_tags_batch_delete(get_server) -> None: + server = get_server + content = [make_workbook(), make_view(), make_datasource(), make_table(), make_database()] + tags = ["a", "b"] + add_tags_xml = batch_add_tags_xml_response_factory(tags, content) + with requests_mock.mock() as m: + m.put( + f"{server.tags.baseurl}:batchDelete", + status_code=200, + text=add_tags_xml, + ) + tag_result = server.tags.batch_delete(tags, content) + + assert set(tag_result) == set(tags) diff --git a/test/test_task.py b/test/test_task.py index 2529f811a..2d724b879 100644 --- a/test/test_task.py +++ b/test/test_task.py @@ -1,26 +1,38 @@ -import unittest import os +import unittest +from datetime import time +from pathlib import Path + import requests_mock + import tableauserverclient as TSC +from tableauserverclient.datetime_helpers import parse_datetime +from tableauserverclient.models.task_item import TaskItem -TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets") +TEST_ASSET_DIR = Path(__file__).parent / "assets" GET_XML_NO_WORKBOOK = os.path.join(TEST_ASSET_DIR, "tasks_no_workbook_or_datasource.xml") GET_XML_WITH_WORKBOOK = os.path.join(TEST_ASSET_DIR, "tasks_with_workbook.xml") GET_XML_WITH_DATASOURCE = os.path.join(TEST_ASSET_DIR, "tasks_with_datasource.xml") GET_XML_WITH_WORKBOOK_AND_DATASOURCE = os.path.join(TEST_ASSET_DIR, "tasks_with_workbook_and_datasource.xml") +GET_XML_DATAACCELERATION_TASK = os.path.join(TEST_ASSET_DIR, "tasks_with_dataacceleration_task.xml") +GET_XML_RUN_NOW_RESPONSE = os.path.join(TEST_ASSET_DIR, "tasks_run_now_response.xml") +GET_XML_CREATE_TASK_RESPONSE = os.path.join(TEST_ASSET_DIR, "tasks_create_extract_task.xml") +GET_XML_WITHOUT_SCHEDULE = TEST_ASSET_DIR / "tasks_without_schedule.xml" +GET_XML_WITH_INTERVAL = TEST_ASSET_DIR / "tasks_with_interval.xml" class TaskTests(unittest.TestCase): def setUp(self): - self.server = TSC.Server("http://test") - self.server.version = '2.6' + self.server = TSC.Server("http://test", False) + self.server.version = "3.19" # Fake Signin self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" - self.baseurl = self.server.tasks.baseurl + # default task type is extractRefreshes + self.baseurl = "{}/{}".format(self.server.tasks.baseurl, "extractRefreshes") def test_get_tasks_with_no_workbook(self): with open(GET_XML_NO_WORKBOOK, "rb") as f: @@ -40,8 +52,8 @@ def test_get_tasks_with_workbook(self): all_tasks, pagination_item = self.server.tasks.get() task = all_tasks[0] - self.assertEqual('c7a9327e-1cda-4504-b026-ddb43b976d1d', task.target.id) - self.assertEqual('workbook', task.target.type) + self.assertEqual("c7a9327e-1cda-4504-b026-ddb43b976d1d", task.target.id) + self.assertEqual("workbook", task.target.type) def test_get_tasks_with_datasource(self): with open(GET_XML_WITH_DATASOURCE, "rb") as f: @@ -51,8 +63,8 @@ def test_get_tasks_with_datasource(self): all_tasks, pagination_item = self.server.tasks.get() task = all_tasks[0] - self.assertEqual('c7a9327e-1cda-4504-b026-ddb43b976d1d', task.target.id) - self.assertEqual('datasource', task.target.type) + self.assertEqual("c7a9327e-1cda-4504-b026-ddb43b976d1d", task.target.id) + self.assertEqual("datasource", task.target.type) def test_get_tasks_with_workbook_and_datasource(self): with open(GET_XML_WITH_WORKBOOK_AND_DATASOURCE, "rb") as f: @@ -61,9 +73,9 @@ def test_get_tasks_with_workbook_and_datasource(self): m.get(self.baseurl, text=response_xml) all_tasks, pagination_item = self.server.tasks.get() - self.assertEqual('workbook', all_tasks[0].target.type) - self.assertEqual('datasource', all_tasks[1].target.type) - self.assertEqual('workbook', all_tasks[2].target.type) + self.assertEqual("workbook", all_tasks[0].target.type) + self.assertEqual("datasource", all_tasks[1].target.type) + self.assertEqual("workbook", all_tasks[2].target.type) def test_get_task_with_schedule(self): with open(GET_XML_WITH_WORKBOOK, "rb") as f: @@ -73,6 +85,105 @@ def test_get_task_with_schedule(self): all_tasks, pagination_item = self.server.tasks.get() task = all_tasks[0] - self.assertEqual('c7a9327e-1cda-4504-b026-ddb43b976d1d', task.target.id) - self.assertEqual('workbook', task.target.type) - self.assertEqual('b60b4efd-a6f7-4599-beb3-cb677e7abac1', task.schedule_id) + self.assertEqual("c7a9327e-1cda-4504-b026-ddb43b976d1d", task.target.id) + self.assertEqual("workbook", task.target.type) + self.assertEqual("b60b4efd-a6f7-4599-beb3-cb677e7abac1", task.schedule_id) + + def test_get_task_without_schedule(self): + with requests_mock.mock() as m: + m.get(self.baseurl, text=GET_XML_WITHOUT_SCHEDULE.read_text()) + all_tasks, pagination_item = self.server.tasks.get() + + task = all_tasks[0] + self.assertEqual("c7a9327e-1cda-4504-b026-ddb43b976d1d", task.target.id) + self.assertEqual("datasource", task.target.type) + + def test_get_task_with_interval(self): + with requests_mock.mock() as m: + m.get(self.baseurl, text=GET_XML_WITH_INTERVAL.read_text()) + all_tasks, pagination_item = self.server.tasks.get() + + task = all_tasks[0] + self.assertEqual("e4de0575-fcc7-4232-5659-be09bb8e7654", task.target.id) + self.assertEqual("datasource", task.target.type) + + def test_delete(self): + with requests_mock.mock() as m: + m.delete(self.baseurl + "/c7a9327e-1cda-4504-b026-ddb43b976d1d", status_code=204) + self.server.tasks.delete("c7a9327e-1cda-4504-b026-ddb43b976d1d") + + def test_delete_missing_id(self): + self.assertRaises(ValueError, self.server.tasks.delete, "") + + def test_get_materializeviews_tasks(self): + with open(GET_XML_DATAACCELERATION_TASK, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(f"{self.server.tasks.baseurl}/{TaskItem.Type.DataAcceleration}", text=response_xml) + all_tasks, pagination_item = self.server.tasks.get(task_type=TaskItem.Type.DataAcceleration) + + task = all_tasks[0] + self.assertEqual("a462c148-fc40-4670-a8e4-39b7f0c58c7f", task.target.id) + self.assertEqual("workbook", task.target.type) + self.assertEqual("b22190b4-6ac2-4eed-9563-4afc03444413", task.schedule_id) + self.assertEqual(parse_datetime("2019-12-09T22:30:00Z"), task.schedule_item.next_run_at) + self.assertEqual(parse_datetime("2019-12-09T20:45:04Z"), task.last_run_at) + self.assertEqual(TSC.TaskItem.Type.DataAcceleration, task.task_type) + + def test_delete_data_acceleration(self): + with requests_mock.mock() as m: + m.delete( + "{}/{}/{}".format( + self.server.tasks.baseurl, TaskItem.Type.DataAcceleration, "c9cff7f9-309c-4361-99ff-d4ba8c9f5467" + ), + status_code=204, + ) + self.server.tasks.delete("c9cff7f9-309c-4361-99ff-d4ba8c9f5467", TaskItem.Type.DataAcceleration) + + def test_get_by_id(self): + with open(GET_XML_WITH_WORKBOOK, "rb") as f: + response_xml = f.read().decode("utf-8") + task_id = "f84901ac-72ad-4f9b-a87e-7a3500402ad6" + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/{task_id}", text=response_xml) + task = self.server.tasks.get_by_id(task_id) + + self.assertEqual("c7a9327e-1cda-4504-b026-ddb43b976d1d", task.target.id) + self.assertEqual("workbook", task.target.type) + self.assertEqual("b60b4efd-a6f7-4599-beb3-cb677e7abac1", task.schedule_id) + self.assertEqual(TSC.TaskItem.Type.ExtractRefresh, task.task_type) + + def test_run_now(self): + task_id = "f84901ac-72ad-4f9b-a87e-7a3500402ad6" + task = TaskItem(task_id, TaskItem.Type.ExtractRefresh, 100) + with open(GET_XML_RUN_NOW_RESPONSE, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(f"{self.baseurl}/{task_id}/runNow", text=response_xml) + job_response_content = self.server.tasks.run(task).decode("utf-8") + + self.assertTrue("7b6b59a8-ac3c-4d1d-2e9e-0b5b4ba8a7b6" in job_response_content) + self.assertTrue("RefreshExtract" in job_response_content) + + def test_create_extract_task(self): + monthly_interval = TSC.MonthlyInterval(start_time=time(23, 30), interval_value=15) + monthly_schedule = TSC.ScheduleItem( + None, + None, + None, + None, + monthly_interval, + ) + target_item = TSC.Target("workbook_id", "workbook") + + task = TaskItem(None, "FullRefresh", None, schedule_item=monthly_schedule, target=target_item) + + with open(GET_XML_CREATE_TASK_RESPONSE, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(f"{self.baseurl}", text=response_xml) + create_response_content = self.server.tasks.create(task).decode("utf-8") + + self.assertTrue("task_id" in create_response_content) + self.assertTrue("workbook_id" in create_response_content) + self.assertTrue("FullRefresh" in create_response_content) diff --git a/test/test_user.py b/test/test_user.py index 8df2f2b2e..a46624845 100644 --- a/test/test_user.py +++ b/test/test_user.py @@ -1,54 +1,64 @@ -import unittest import os +import unittest + import requests_mock + import tableauserverclient as TSC from tableauserverclient.datetime_helpers import format_datetime -TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets') +TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets") + +GET_XML = os.path.join(TEST_ASSET_DIR, "user_get.xml") +GET_EMPTY_XML = os.path.join(TEST_ASSET_DIR, "user_get_empty.xml") +GET_BY_ID_XML = os.path.join(TEST_ASSET_DIR, "user_get_by_id.xml") +UPDATE_XML = os.path.join(TEST_ASSET_DIR, "user_update.xml") +ADD_XML = os.path.join(TEST_ASSET_DIR, "user_add.xml") +POPULATE_WORKBOOKS_XML = os.path.join(TEST_ASSET_DIR, "user_populate_workbooks.xml") +GET_FAVORITES_XML = os.path.join(TEST_ASSET_DIR, "favorites_get.xml") +POPULATE_GROUPS_XML = os.path.join(TEST_ASSET_DIR, "user_populate_groups.xml") -GET_XML = os.path.join(TEST_ASSET_DIR, 'user_get.xml') -GET_EMPTY_XML = os.path.join(TEST_ASSET_DIR, 'user_get_empty.xml') -GET_BY_ID_XML = os.path.join(TEST_ASSET_DIR, 'user_get_by_id.xml') -UPDATE_XML = os.path.join(TEST_ASSET_DIR, 'user_update.xml') -ADD_XML = os.path.join(TEST_ASSET_DIR, 'user_add.xml') -POPULATE_WORKBOOKS_XML = os.path.join(TEST_ASSET_DIR, 'user_populate_workbooks.xml') -ADD_FAVORITE_XML = os.path.join(TEST_ASSET_DIR, 'user_add_favorite.xml') +USERNAMES = os.path.join(TEST_ASSET_DIR, "Data", "usernames.csv") +USERS = os.path.join(TEST_ASSET_DIR, "Data", "user_details.csv") class UserTests(unittest.TestCase): - def setUp(self): - self.server = TSC.Server('http://test') + def setUp(self) -> None: + self.server = TSC.Server("http://test", False) # Fake signin - self.server._site_id = 'dad65087-b08b-4603-af4e-2887b8aafc67' - self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM' + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" self.baseurl = self.server.users.baseurl - def test_get(self): - with open(GET_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_get(self) -> None: + with open(GET_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.baseurl, text=response_xml) + m.get(self.baseurl + "?fields=_all_", text=response_xml) all_users, pagination_item = self.server.users.get() self.assertEqual(2, pagination_item.total_available) self.assertEqual(2, len(all_users)) - self.assertTrue(any(user.id == 'dd2239f6-ddf1-4107-981a-4cf94e415794' for user in all_users)) - single_user = next(user for user in all_users if user.id == 'dd2239f6-ddf1-4107-981a-4cf94e415794') - self.assertEqual('alice', single_user.name) - self.assertEqual('Publisher', single_user.site_role) - self.assertEqual('2016-08-16T23:17:06Z', format_datetime(single_user.last_login)) - - self.assertTrue(any(user.id == '2a47bbf8-8900-4ebb-b0a4-2723bd7c46c3' for user in all_users)) - single_user = next(user for user in all_users if user.id == '2a47bbf8-8900-4ebb-b0a4-2723bd7c46c3') - self.assertEqual('Bob', single_user.name) - self.assertEqual('Interactor', single_user.site_role) - - def test_get_empty(self): - with open(GET_EMPTY_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + self.assertTrue(any(user.id == "dd2239f6-ddf1-4107-981a-4cf94e415794" for user in all_users)) + single_user = next(user for user in all_users if user.id == "dd2239f6-ddf1-4107-981a-4cf94e415794") + self.assertEqual("alice", single_user.name) + self.assertEqual("Publisher", single_user.site_role) + self.assertEqual("2016-08-16T23:17:06Z", format_datetime(single_user.last_login)) + self.assertEqual("alice cook", single_user.fullname) + self.assertEqual("alicecook@test.com", single_user.email) + + self.assertTrue(any(user.id == "2a47bbf8-8900-4ebb-b0a4-2723bd7c46c3" for user in all_users)) + single_user = next(user for user in all_users if user.id == "2a47bbf8-8900-4ebb-b0a4-2723bd7c46c3") + self.assertEqual("Bob", single_user.name) + self.assertEqual("Interactor", single_user.site_role) + self.assertEqual("Bob Smith", single_user.fullname) + self.assertEqual("bob@test.com", single_user.email) + + def test_get_empty(self) -> None: + with open(GET_EMPTY_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.get(self.baseurl, text=response_xml) all_users, pagination_item = self.server.users.get() @@ -56,93 +66,170 @@ def test_get_empty(self): self.assertEqual(0, pagination_item.total_available) self.assertEqual([], all_users) - def test_get_before_signin(self): + def test_get_before_signin(self) -> None: self.server._auth_token = None self.assertRaises(TSC.NotSignedInError, self.server.users.get) - def test_get_by_id(self): - with open(GET_BY_ID_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_get_by_id(self) -> None: + with open(GET_BY_ID_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.baseurl + '/dd2239f6-ddf1-4107-981a-4cf94e415794', text=response_xml) - single_user = self.server.users.get_by_id('dd2239f6-ddf1-4107-981a-4cf94e415794') - - self.assertEqual('dd2239f6-ddf1-4107-981a-4cf94e415794', single_user.id) - self.assertEqual('alice', single_user.name) - self.assertEqual('Alice', single_user.fullname) - self.assertEqual('Publisher', single_user.site_role) - self.assertEqual('ServerDefault', single_user.auth_setting) - self.assertEqual('2016-08-16T23:17:06Z', format_datetime(single_user.last_login)) - self.assertEqual('local', single_user.domain_name) - - def test_get_by_id_missing_id(self): - self.assertRaises(ValueError, self.server.users.get_by_id, '') - - def test_update(self): - with open(UPDATE_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + m.get(self.baseurl + "/dd2239f6-ddf1-4107-981a-4cf94e415794", text=response_xml) + single_user = self.server.users.get_by_id("dd2239f6-ddf1-4107-981a-4cf94e415794") + + self.assertEqual("dd2239f6-ddf1-4107-981a-4cf94e415794", single_user.id) + self.assertEqual("alice", single_user.name) + self.assertEqual("Alice", single_user.fullname) + self.assertEqual("Publisher", single_user.site_role) + self.assertEqual("ServerDefault", single_user.auth_setting) + self.assertEqual("2016-08-16T23:17:06Z", format_datetime(single_user.last_login)) + self.assertEqual("local", single_user.domain_name) + + def test_get_by_id_missing_id(self) -> None: + self.assertRaises(ValueError, self.server.users.get_by_id, "") + + def test_update(self) -> None: + with open(UPDATE_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.put(self.baseurl + '/dd2239f6-ddf1-4107-981a-4cf94e415794', text=response_xml) - single_user = TSC.UserItem('test', 'Viewer') - single_user._id = 'dd2239f6-ddf1-4107-981a-4cf94e415794' - single_user.name = 'Cassie' - single_user.fullname = 'Cassie' - single_user.email = 'cassie@email.com' + m.put(self.baseurl + "/dd2239f6-ddf1-4107-981a-4cf94e415794", text=response_xml) + single_user = TSC.UserItem("test", "Viewer") + single_user._id = "dd2239f6-ddf1-4107-981a-4cf94e415794" + single_user.name = "Cassie" + single_user.fullname = "Cassie" + single_user.email = "cassie@email.com" single_user = self.server.users.update(single_user) - self.assertEqual('Cassie', single_user.name) - self.assertEqual('Cassie', single_user.fullname) - self.assertEqual('cassie@email.com', single_user.email) - self.assertEqual('Viewer', single_user.site_role) + self.assertEqual("Cassie", single_user.name) + self.assertEqual("Cassie", single_user.fullname) + self.assertEqual("cassie@email.com", single_user.email) + self.assertEqual("Viewer", single_user.site_role) - def test_update_missing_id(self): - single_user = TSC.UserItem('test', 'Interactor') + def test_update_missing_id(self) -> None: + single_user = TSC.UserItem("test", "Interactor") self.assertRaises(TSC.MissingRequiredFieldError, self.server.users.update, single_user) - def test_remove(self): + def test_remove(self) -> None: with requests_mock.mock() as m: - m.delete(self.baseurl + '/dd2239f6-ddf1-4107-981a-4cf94e415794', status_code=204) - self.server.users.remove('dd2239f6-ddf1-4107-981a-4cf94e415794') + m.delete(self.baseurl + "/dd2239f6-ddf1-4107-981a-4cf94e415794", status_code=204) + self.server.users.remove("dd2239f6-ddf1-4107-981a-4cf94e415794") - def test_remove_missing_id(self): - self.assertRaises(ValueError, self.server.users.remove, '') - - def test_add(self): - with open(ADD_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_remove_with_replacement(self) -> None: + with requests_mock.mock() as m: + m.delete( + self.baseurl + + "/dd2239f6-ddf1-4107-981a-4cf94e415794" + + "?mapAssetsTo=4cc4c17f-898a-4de4-abed-a1681c673ced", + status_code=204, + ) + self.server.users.remove("dd2239f6-ddf1-4107-981a-4cf94e415794", "4cc4c17f-898a-4de4-abed-a1681c673ced") + + def test_remove_missing_id(self) -> None: + self.assertRaises(ValueError, self.server.users.remove, "") + + def test_add(self) -> None: + with open(ADD_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.post(self.baseurl + '', text=response_xml) - new_user = TSC.UserItem(name='Cassie', site_role='Viewer', auth_setting='ServerDefault') + m.post(self.baseurl + "", text=response_xml) + new_user = TSC.UserItem(name="Cassie", site_role="Viewer", auth_setting="ServerDefault") new_user = self.server.users.add(new_user) - self.assertEqual('4cc4c17f-898a-4de4-abed-a1681c673ced', new_user.id) - self.assertEqual('Cassie', new_user.name) - self.assertEqual('Viewer', new_user.site_role) - self.assertEqual('ServerDefault', new_user.auth_setting) + self.assertEqual("4cc4c17f-898a-4de4-abed-a1681c673ced", new_user.id) + self.assertEqual("Cassie", new_user.name) + self.assertEqual("Viewer", new_user.site_role) + self.assertEqual("ServerDefault", new_user.auth_setting) - def test_populate_workbooks(self): - with open(POPULATE_WORKBOOKS_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_populate_workbooks(self) -> None: + with open(POPULATE_WORKBOOKS_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.baseurl + '/dd2239f6-ddf1-4107-981a-4cf94e415794/workbooks', - text=response_xml) - single_user = TSC.UserItem('test', 'Interactor') - single_user._id = 'dd2239f6-ddf1-4107-981a-4cf94e415794' + m.get(self.baseurl + "/dd2239f6-ddf1-4107-981a-4cf94e415794/workbooks", text=response_xml) + single_user = TSC.UserItem("test", "Interactor") + single_user._id = "dd2239f6-ddf1-4107-981a-4cf94e415794" self.server.users.populate_workbooks(single_user) workbook_list = list(single_user.workbooks) - self.assertEqual('3cc6cd06-89ce-4fdc-b935-5294135d6d42', workbook_list[0].id) - self.assertEqual('SafariSample', workbook_list[0].name) - self.assertEqual('SafariSample', workbook_list[0].content_url) + self.assertEqual("3cc6cd06-89ce-4fdc-b935-5294135d6d42", workbook_list[0].id) + self.assertEqual("SafariSample", workbook_list[0].name) + self.assertEqual("SafariSample", workbook_list[0].content_url) self.assertEqual(False, workbook_list[0].show_tabs) self.assertEqual(26, workbook_list[0].size) - self.assertEqual('2016-07-26T20:34:56Z', format_datetime(workbook_list[0].created_at)) - self.assertEqual('2016-07-26T20:35:05Z', format_datetime(workbook_list[0].updated_at)) - self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', workbook_list[0].project_id) - self.assertEqual('default', workbook_list[0].project_name) - self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', workbook_list[0].owner_id) - self.assertEqual(set(['Safari', 'Sample']), workbook_list[0].tags) - - def test_populate_workbooks_missing_id(self): - single_user = TSC.UserItem('test', 'Interactor') + self.assertEqual("2016-07-26T20:34:56Z", format_datetime(workbook_list[0].created_at)) + self.assertEqual("2016-07-26T20:35:05Z", format_datetime(workbook_list[0].updated_at)) + self.assertEqual("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", workbook_list[0].project_id) + self.assertEqual("default", workbook_list[0].project_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", workbook_list[0].owner_id) + self.assertEqual({"Safari", "Sample"}, workbook_list[0].tags) + + def test_populate_workbooks_missing_id(self) -> None: + single_user = TSC.UserItem("test", "Interactor") self.assertRaises(TSC.MissingRequiredFieldError, self.server.users.populate_workbooks, single_user) + + def test_populate_favorites(self) -> None: + self.server.version = "2.5" + baseurl = self.server.favorites.baseurl + single_user = TSC.UserItem("test", "Interactor") + with open(GET_FAVORITES_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(f"{baseurl}/{single_user.id}", text=response_xml) + self.server.users.populate_favorites(single_user) + self.assertIsNotNone(single_user._favorites) + self.assertEqual(len(single_user.favorites["workbooks"]), 1) + self.assertEqual(len(single_user.favorites["views"]), 1) + self.assertEqual(len(single_user.favorites["projects"]), 1) + self.assertEqual(len(single_user.favorites["datasources"]), 1) + + workbook = single_user.favorites["workbooks"][0] + view = single_user.favorites["views"][0] + datasource = single_user.favorites["datasources"][0] + project = single_user.favorites["projects"][0] + + self.assertEqual(workbook.id, "6d13b0ca-043d-4d42-8c9d-3f3313ea3a00") + self.assertEqual(view.id, "d79634e1-6063-4ec9-95ff-50acbf609ff5") + self.assertEqual(datasource.id, "e76a1461-3b1d-4588-bf1b-17551a879ad9") + self.assertEqual(project.id, "1d0304cd-3796-429f-b815-7258370b9b74") + + def test_populate_groups(self) -> None: + self.server.version = "3.7" + with open(POPULATE_GROUPS_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.server.users.baseurl + "/dd2239f6-ddf1-4107-981a-4cf94e415794/groups", text=response_xml) + single_user = TSC.UserItem("test", "Interactor") + single_user._id = "dd2239f6-ddf1-4107-981a-4cf94e415794" + self.server.users.populate_groups(single_user) + + group_list = list(single_user.groups) + + self.assertEqual(3, len(group_list)) + self.assertEqual("ef8b19c0-43b6-11e6-af50-63f5805dbe3c", group_list[0].id) + self.assertEqual("All Users", group_list[0].name) + self.assertEqual("local", group_list[0].domain_name) + + self.assertEqual("e7833b48-c6f7-47b5-a2a7-36e7dd232758", group_list[1].id) + self.assertEqual("Another group", group_list[1].name) + self.assertEqual("local", group_list[1].domain_name) + + self.assertEqual("86a66d40-f289-472a-83d0-927b0f954dc8", group_list[2].id) + self.assertEqual("TableauExample", group_list[2].name) + self.assertEqual("local", group_list[2].domain_name) + + def test_get_usernames_from_file(self): + with open(ADD_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.server.users.baseurl, text=response_xml) + user_list, failures = self.server.users.create_from_file(USERNAMES) + assert user_list[0].name == "Cassie", user_list + assert failures == [], failures + + def test_get_users_from_file(self): + with open(ADD_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.server.users.baseurl, text=response_xml) + users, failures = self.server.users.create_from_file(USERS) + assert users[0].name == "Cassie", users + assert failures == [] diff --git a/test/test_user_model.py b/test/test_user_model.py index 5826fb148..a8a2c51cb 100644 --- a/test/test_user_model.py +++ b/test/test_user_model.py @@ -1,18 +1,14 @@ +import logging import unittest -import tableauserverclient as TSC +from unittest.mock import * +import io +import pytest -class UserModelTests(unittest.TestCase): - def test_invalid_name(self): - self.assertRaises(ValueError, TSC.UserItem, None, TSC.UserItem.Roles.Publisher) - self.assertRaises(ValueError, TSC.UserItem, "", TSC.UserItem.Roles.Publisher) - user = TSC.UserItem("me", TSC.UserItem.Roles.Publisher) - with self.assertRaises(ValueError): - user.name = None +import tableauserverclient as TSC - with self.assertRaises(ValueError): - user.name = "" +class UserModelTests(unittest.TestCase): def test_invalid_auth_setting(self): user = TSC.UserItem("me", TSC.UserItem.Roles.Publisher) with self.assertRaises(ValueError): @@ -22,3 +18,110 @@ def test_invalid_site_role(self): user = TSC.UserItem("me", TSC.UserItem.Roles.Publisher) with self.assertRaises(ValueError): user.site_role = "Hello" + + +class UserDataTest(unittest.TestCase): + logger = logging.getLogger("UserDataTest") + + role_inputs = [ + ["creator", "system", "yes", "SiteAdministrator"], + ["None", "system", "no", "SiteAdministrator"], + ["explorer", "SysTEm", "no", "SiteAdministrator"], + ["creator", "site", "yes", "SiteAdministratorCreator"], + ["explorer", "site", "yes", "SiteAdministratorExplorer"], + ["creator", "SITE", "no", "SiteAdministratorCreator"], + ["creator", "none", "yes", "Creator"], + ["explorer", "none", "yes", "ExplorerCanPublish"], + ["viewer", "None", "no", "Viewer"], + ["explorer", "no", "yes", "ExplorerCanPublish"], + ["EXPLORER", "noNO", "yes", "ExplorerCanPublish"], + ["explorer", "no", "no", "Explorer"], + ["unlicensed", "none", "no", "Unlicensed"], + ["Chef", "none", "yes", "Unlicensed"], + ["yes", "yes", "yes", "Unlicensed"], + ] + + valid_import_content = [ + "username, pword, fname, creator, site, yes, email", + "username, pword, fname, explorer, none, no, email", + "", + "u", + "p", + ] + + valid_username_content = ["jfitzgerald@tableau.com"] + + usernames = [ + "valid", + "valid@email.com", + "domain/valid", + "domain/valid@tmail.com", + "va!@#$%^&*()lid", + "in@v@lid", + "in valid", + "", + ] + + def test_validate_usernames(self): + TSC.UserItem.validate_username_or_throw(UserDataTest.usernames[0]) + TSC.UserItem.validate_username_or_throw(UserDataTest.usernames[1]) + TSC.UserItem.validate_username_or_throw(UserDataTest.usernames[2]) + TSC.UserItem.validate_username_or_throw(UserDataTest.usernames[3]) + TSC.UserItem.validate_username_or_throw(UserDataTest.usernames[4]) + with self.assertRaises(AttributeError): + TSC.UserItem.validate_username_or_throw(UserDataTest.usernames[5]) + with self.assertRaises(AttributeError): + TSC.UserItem.validate_username_or_throw(UserDataTest.usernames[6]) + + def test_evaluate_role(self): + for line in UserDataTest.role_inputs: + actual = TSC.UserItem.CSVImport._evaluate_site_role(line[0], line[1], line[2]) + assert actual == line[3], line + [actual] + + def test_get_user_detail_empty_line(self): + test_line = "" + test_user = TSC.UserItem.CSVImport.create_user_from_line(test_line) + assert test_user is None + + def test_get_user_detail_standard(self): + test_line = "username, pword, fname, license, admin, pub, email" + test_user: TSC.UserItem = TSC.UserItem.CSVImport.create_user_from_line(test_line) + assert test_user.name == "username", test_user.name + assert test_user.fullname == "fname", test_user.fullname + assert test_user.site_role == "Unlicensed", test_user.site_role + assert test_user.email == "email", test_user.email + + def test_get_user_details_only_username(self): + test_line = "username" + test_user: TSC.UserItem = TSC.UserItem.CSVImport.create_user_from_line(test_line) + + def test_populate_user_details_only_some(self): + values = "username, , , creator, admin" + user = TSC.UserItem.CSVImport.create_user_from_line(values) + assert user.name == "username" + + def test_validate_user_detail_standard(self): + test_line = "username, pword, fname, creator, site, 1, email" + TSC.UserItem.CSVImport._validate_import_line_or_throw(test_line, UserDataTest.logger) + TSC.UserItem.CSVImport.create_user_from_line(test_line) + + # for file handling + def _mock_file_content(self, content: list[str]) -> io.TextIOWrapper: + # the empty string represents EOF + # the tests run through the file twice, first to validate then to fetch + mock = MagicMock(io.TextIOWrapper) + content.append("") # EOF + mock.readline.side_effect = content + mock.name = "file-mock" + return mock + + def test_validate_import_file(self): + test_data = self._mock_file_content(UserDataTest.valid_import_content) + valid, invalid = TSC.UserItem.CSVImport.validate_file_for_import(test_data, UserDataTest.logger) + assert valid == 2, f"Expected two lines to be parsed, got {valid}" + assert invalid == [], f"Expected no failures, got {invalid}" + + def test_validate_usernames_file(self): + test_data = self._mock_file_content(UserDataTest.usernames) + valid, invalid = TSC.UserItem.CSVImport.validate_file_for_import(test_data, UserDataTest.logger) + assert valid == 5, f"Exactly 5 of the lines were valid, counted {valid + invalid}" diff --git a/test/test_view.py b/test/test_view.py index 292f86887..a89a6d235 100644 --- a/test/test_view.py +++ b/test/test_view.py @@ -1,73 +1,137 @@ -import unittest import os +import unittest + import requests_mock + import tableauserverclient as TSC +from tableauserverclient import UserItem, GroupItem, PermissionsRule +from tableauserverclient.datetime_helpers import format_datetime -TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets') +TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets") -ADD_TAGS_XML = os.path.join(TEST_ASSET_DIR, 'view_add_tags.xml') -GET_XML = os.path.join(TEST_ASSET_DIR, 'view_get.xml') -GET_XML_USAGE = os.path.join(TEST_ASSET_DIR, 'view_get_usage.xml') -POPULATE_PREVIEW_IMAGE = os.path.join(TEST_ASSET_DIR, 'Sample View Image.png') -POPULATE_PDF = os.path.join(TEST_ASSET_DIR, 'populate_pdf.pdf') -POPULATE_CSV = os.path.join(TEST_ASSET_DIR, 'populate_csv.csv') -UPDATE_XML = os.path.join(TEST_ASSET_DIR, 'workbook_update.xml') +ADD_TAGS_XML = os.path.join(TEST_ASSET_DIR, "view_add_tags.xml") +GET_XML = os.path.join(TEST_ASSET_DIR, "view_get.xml") +GET_XML_ID = os.path.join(TEST_ASSET_DIR, "view_get_id.xml") +GET_XML_USAGE = os.path.join(TEST_ASSET_DIR, "view_get_usage.xml") +GET_XML_ID_USAGE = os.path.join(TEST_ASSET_DIR, "view_get_id_usage.xml") +POPULATE_PREVIEW_IMAGE = os.path.join(TEST_ASSET_DIR, "Sample View Image.png") +POPULATE_PDF = os.path.join(TEST_ASSET_DIR, "populate_pdf.pdf") +POPULATE_CSV = os.path.join(TEST_ASSET_DIR, "populate_csv.csv") +POPULATE_EXCEL = os.path.join(TEST_ASSET_DIR, "populate_excel.xlsx") +POPULATE_PERMISSIONS_XML = os.path.join(TEST_ASSET_DIR, "view_populate_permissions.xml") +UPDATE_PERMISSIONS = os.path.join(TEST_ASSET_DIR, "view_update_permissions.xml") +UPDATE_XML = os.path.join(TEST_ASSET_DIR, "workbook_update.xml") class ViewTests(unittest.TestCase): def setUp(self): - self.server = TSC.Server('http://test') - self.server.version = '2.7' + self.server = TSC.Server("http://test", False) + self.server.version = "3.2" # Fake sign in - self.server._site_id = 'dad65087-b08b-4603-af4e-2887b8aafc67' - self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM' + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" self.baseurl = self.server.views.baseurl self.siteurl = self.server.views.siteurl - def test_get(self): - with open(GET_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_get(self) -> None: + with open(GET_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.get(self.baseurl, text=response_xml) all_views, pagination_item = self.server.views.get() self.assertEqual(2, pagination_item.total_available) - self.assertEqual('d79634e1-6063-4ec9-95ff-50acbf609ff5', all_views[0].id) - self.assertEqual('ENDANGERED SAFARI', all_views[0].name) - self.assertEqual('SafariSample/sheets/ENDANGEREDSAFARI', all_views[0].content_url) - self.assertEqual('3cc6cd06-89ce-4fdc-b935-5294135d6d42', all_views[0].workbook_id) - self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', all_views[0].owner_id) - self.assertEqual('5241e88d-d384-4fd7-9c2f-648b5247efc5', all_views[0].project_id) - - self.assertEqual('fd252f73-593c-4c4e-8584-c032b8022adc', all_views[1].id) - self.assertEqual('Overview', all_views[1].name) - self.assertEqual('Superstore/sheets/Overview', all_views[1].content_url) - self.assertEqual('6d13b0ca-043d-4d42-8c9d-3f3313ea3a00', all_views[1].workbook_id) - self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', all_views[1].owner_id) - self.assertEqual('5b534f74-3226-11e8-b47a-cb2e00f738a3', all_views[1].project_id) - - def test_get_with_usage(self): - with open(GET_XML_USAGE, 'rb') as f: - response_xml = f.read().decode('utf-8') + self.assertEqual("d79634e1-6063-4ec9-95ff-50acbf609ff5", all_views[0].id) + self.assertEqual("ENDANGERED SAFARI", all_views[0].name) + self.assertEqual("SafariSample/sheets/ENDANGEREDSAFARI", all_views[0].content_url) + self.assertEqual("3cc6cd06-89ce-4fdc-b935-5294135d6d42", all_views[0].workbook_id) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", all_views[0].owner_id) + self.assertEqual("5241e88d-d384-4fd7-9c2f-648b5247efc5", all_views[0].project_id) + self.assertEqual({"tag1", "tag2"}, all_views[0].tags) + self.assertIsNone(all_views[0].created_at) + self.assertIsNone(all_views[0].updated_at) + self.assertIsNone(all_views[0].sheet_type) + + self.assertEqual("fd252f73-593c-4c4e-8584-c032b8022adc", all_views[1].id) + self.assertEqual("Overview", all_views[1].name) + self.assertEqual("Superstore/sheets/Overview", all_views[1].content_url) + self.assertEqual("6d13b0ca-043d-4d42-8c9d-3f3313ea3a00", all_views[1].workbook_id) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", all_views[1].owner_id) + self.assertEqual("5b534f74-3226-11e8-b47a-cb2e00f738a3", all_views[1].project_id) + self.assertEqual("2002-05-30T09:00:00Z", format_datetime(all_views[1].created_at)) + self.assertEqual("2002-06-05T08:00:59Z", format_datetime(all_views[1].updated_at)) + self.assertEqual("story", all_views[1].sheet_type) + + def test_get_by_id(self) -> None: + with open(GET_XML_ID, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5", text=response_xml) + view = self.server.views.get_by_id("d79634e1-6063-4ec9-95ff-50acbf609ff5") + + self.assertEqual("d79634e1-6063-4ec9-95ff-50acbf609ff5", view.id) + self.assertEqual("ENDANGERED SAFARI", view.name) + self.assertEqual("SafariSample/sheets/ENDANGEREDSAFARI", view.content_url) + self.assertEqual("3cc6cd06-89ce-4fdc-b935-5294135d6d42", view.workbook_id) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", view.owner_id) + self.assertEqual("5241e88d-d384-4fd7-9c2f-648b5247efc5", view.project_id) + self.assertEqual({"tag1", "tag2"}, view.tags) + self.assertEqual("2002-05-30T09:00:00Z", format_datetime(view.created_at)) + self.assertEqual("2002-06-05T08:00:59Z", format_datetime(view.updated_at)) + self.assertEqual("story", view.sheet_type) + + def test_get_by_id_usage(self) -> None: + with open(GET_XML_ID_USAGE, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5?includeUsageStatistics=true", text=response_xml) + view = self.server.views.get_by_id("d79634e1-6063-4ec9-95ff-50acbf609ff5", usage=True) + + self.assertEqual("d79634e1-6063-4ec9-95ff-50acbf609ff5", view.id) + self.assertEqual("ENDANGERED SAFARI", view.name) + self.assertEqual("SafariSample/sheets/ENDANGEREDSAFARI", view.content_url) + self.assertEqual("3cc6cd06-89ce-4fdc-b935-5294135d6d42", view.workbook_id) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", view.owner_id) + self.assertEqual("5241e88d-d384-4fd7-9c2f-648b5247efc5", view.project_id) + self.assertEqual({"tag1", "tag2"}, view.tags) + self.assertEqual("2002-05-30T09:00:00Z", format_datetime(view.created_at)) + self.assertEqual("2002-06-05T08:00:59Z", format_datetime(view.updated_at)) + self.assertEqual("story", view.sheet_type) + self.assertEqual(7, view.total_views) + + def test_get_by_id_missing_id(self) -> None: + self.assertRaises(TSC.MissingRequiredFieldError, self.server.views.get_by_id, None) + + def test_get_with_usage(self) -> None: + with open(GET_XML_USAGE, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.get(self.baseurl + "?includeUsageStatistics=true", text=response_xml) all_views, pagination_item = self.server.views.get(usage=True) - self.assertEqual('d79634e1-6063-4ec9-95ff-50acbf609ff5', all_views[0].id) + self.assertEqual("d79634e1-6063-4ec9-95ff-50acbf609ff5", all_views[0].id) self.assertEqual(7, all_views[0].total_views) - self.assertEqual('fd252f73-593c-4c4e-8584-c032b8022adc', all_views[1].id) + self.assertIsNone(all_views[0].created_at) + self.assertIsNone(all_views[0].updated_at) + self.assertIsNone(all_views[0].sheet_type) + + self.assertEqual("fd252f73-593c-4c4e-8584-c032b8022adc", all_views[1].id) self.assertEqual(13, all_views[1].total_views) + self.assertEqual("2002-05-30T09:00:00Z", format_datetime(all_views[1].created_at)) + self.assertEqual("2002-06-05T08:00:59Z", format_datetime(all_views[1].updated_at)) + self.assertEqual("story", all_views[1].sheet_type) - def test_get_with_usage_and_filter(self): - with open(GET_XML_USAGE, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_get_with_usage_and_filter(self) -> None: + with open(GET_XML_USAGE, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.get(self.baseurl + "?includeUsageStatistics=true&filter=name:in:[foo,bar]", text=response_xml) options = TSC.RequestOptions() - options.filter.add(TSC.Filter(TSC.RequestOptions.Field.Name, TSC.RequestOptions.Operator.In, - ["foo", "bar"])) + options.filter.add( + TSC.Filter(TSC.RequestOptions.Field.Name, TSC.RequestOptions.Operator.In, ["foo", "bar"]) + ) all_views, pagination_item = self.server.views.get(req_options=options, usage=True) self.assertEqual("ENDANGERED SAFARI", all_views[0].name) @@ -75,100 +139,228 @@ def test_get_with_usage_and_filter(self): self.assertEqual("Overview", all_views[1].name) self.assertEqual(13, all_views[1].total_views) - def test_get_before_signin(self): + def test_get_before_signin(self) -> None: self.server._auth_token = None self.assertRaises(TSC.NotSignedInError, self.server.views.get) - def test_populate_preview_image(self): - with open(POPULATE_PREVIEW_IMAGE, 'rb') as f: + def test_populate_preview_image(self) -> None: + with open(POPULATE_PREVIEW_IMAGE, "rb") as f: response = f.read() with requests_mock.mock() as m: - m.get(self.siteurl + '/workbooks/3cc6cd06-89ce-4fdc-b935-5294135d6d42/' - 'views/d79634e1-6063-4ec9-95ff-50acbf609ff5/previewImage', content=response) + m.get( + self.siteurl + "/workbooks/3cc6cd06-89ce-4fdc-b935-5294135d6d42/" + "views/d79634e1-6063-4ec9-95ff-50acbf609ff5/previewImage", + content=response, + ) single_view = TSC.ViewItem() - single_view._id = 'd79634e1-6063-4ec9-95ff-50acbf609ff5' - single_view._workbook_id = '3cc6cd06-89ce-4fdc-b935-5294135d6d42' + single_view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" + single_view._workbook_id = "3cc6cd06-89ce-4fdc-b935-5294135d6d42" self.server.views.populate_preview_image(single_view) self.assertEqual(response, single_view.preview_image) - def test_populate_preview_image_missing_id(self): + def test_populate_preview_image_missing_id(self) -> None: single_view = TSC.ViewItem() - single_view._id = 'd79634e1-6063-4ec9-95ff-50acbf609ff5' + single_view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" self.assertRaises(TSC.MissingRequiredFieldError, self.server.views.populate_preview_image, single_view) single_view._id = None - single_view._workbook_id = '3cc6cd06-89ce-4fdc-b935-5294135d6d42' + single_view._workbook_id = "3cc6cd06-89ce-4fdc-b935-5294135d6d42" self.assertRaises(TSC.MissingRequiredFieldError, self.server.views.populate_preview_image, single_view) - def test_populate_image(self): - with open(POPULATE_PREVIEW_IMAGE, 'rb') as f: + def test_populate_image(self) -> None: + with open(POPULATE_PREVIEW_IMAGE, "rb") as f: response = f.read() with requests_mock.mock() as m: - m.get(self.baseurl + '/d79634e1-6063-4ec9-95ff-50acbf609ff5/image', content=response) + m.get(self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5/image", content=response) single_view = TSC.ViewItem() - single_view._id = 'd79634e1-6063-4ec9-95ff-50acbf609ff5' + single_view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" self.server.views.populate_image(single_view) self.assertEqual(response, single_view.image) - def test_populate_image_high_resolution(self): - with open(POPULATE_PREVIEW_IMAGE, 'rb') as f: + def test_populate_image_with_options(self) -> None: + with open(POPULATE_PREVIEW_IMAGE, "rb") as f: response = f.read() with requests_mock.mock() as m: - m.get(self.baseurl + '/d79634e1-6063-4ec9-95ff-50acbf609ff5/image?resolution=high', content=response) + m.get( + self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5/image?resolution=high&maxAge=10", content=response + ) single_view = TSC.ViewItem() - single_view._id = 'd79634e1-6063-4ec9-95ff-50acbf609ff5' - req_option = TSC.ImageRequestOptions(imageresolution=TSC.ImageRequestOptions.Resolution.High) + single_view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" + req_option = TSC.ImageRequestOptions(imageresolution=TSC.ImageRequestOptions.Resolution.High, maxage=10) self.server.views.populate_image(single_view, req_option) self.assertEqual(response, single_view.image) - def test_populate_pdf(self): - with open(POPULATE_PDF, 'rb') as f: + def test_populate_pdf(self) -> None: + with open(POPULATE_PDF, "rb") as f: response = f.read() with requests_mock.mock() as m: - m.get(self.baseurl + '/d79634e1-6063-4ec9-95ff-50acbf609ff5/pdf?type=letter&orientation=portrait', - content=response) + m.get( + self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5/pdf?type=letter&orientation=portrait&maxAge=5", + content=response, + ) single_view = TSC.ViewItem() - single_view._id = 'd79634e1-6063-4ec9-95ff-50acbf609ff5' + single_view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" size = TSC.PDFRequestOptions.PageType.Letter orientation = TSC.PDFRequestOptions.Orientation.Portrait - req_option = TSC.PDFRequestOptions(size, orientation) + req_option = TSC.PDFRequestOptions(size, orientation, 5) self.server.views.populate_pdf(single_view, req_option) self.assertEqual(response, single_view.pdf) - def test_populate_csv(self): - with open(POPULATE_CSV, 'rb') as f: + def test_populate_csv(self) -> None: + with open(POPULATE_CSV, "rb") as f: + response = f.read() + with requests_mock.mock() as m: + m.get(self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5/data?maxAge=1", content=response) + single_view = TSC.ViewItem() + single_view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" + request_option = TSC.CSVRequestOptions(maxage=1) + self.server.views.populate_csv(single_view, request_option) + + csv_file = b"".join(single_view.csv) + self.assertEqual(response, csv_file) + + def test_populate_csv_default_maxage(self) -> None: + with open(POPULATE_CSV, "rb") as f: response = f.read() with requests_mock.mock() as m: - m.get(self.baseurl + '/d79634e1-6063-4ec9-95ff-50acbf609ff5/data', content=response) + m.get(self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5/data", content=response) single_view = TSC.ViewItem() - single_view._id = 'd79634e1-6063-4ec9-95ff-50acbf609ff5' + single_view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" self.server.views.populate_csv(single_view) csv_file = b"".join(single_view.csv) self.assertEqual(response, csv_file) - def test_populate_image_missing_id(self): + def test_populate_image_missing_id(self) -> None: single_view = TSC.ViewItem() single_view._id = None self.assertRaises(TSC.MissingRequiredFieldError, self.server.views.populate_image, single_view) - def test_update_tags(self): - with open(ADD_TAGS_XML, 'rb') as f: - add_tags_xml = f.read().decode('utf-8') - with open(UPDATE_XML, 'rb') as f: - update_xml = f.read().decode('utf-8') + def test_populate_permissions(self) -> None: + with open(POPULATE_PERMISSIONS_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.put(self.baseurl + '/d79634e1-6063-4ec9-95ff-50acbf609ff5/tags', text=add_tags_xml) - m.delete(self.baseurl + '/d79634e1-6063-4ec9-95ff-50acbf609ff5/tags/b', status_code=204) - m.delete(self.baseurl + '/d79634e1-6063-4ec9-95ff-50acbf609ff5/tags/d', status_code=204) - m.put(self.baseurl + '/d79634e1-6063-4ec9-95ff-50acbf609ff5', text=update_xml) + m.get(self.baseurl + "/e490bec4-2652-4fda-8c4e-f087db6fa328/permissions", text=response_xml) single_view = TSC.ViewItem() - single_view._id = 'd79634e1-6063-4ec9-95ff-50acbf609ff5' - single_view._initial_tags.update(['a', 'b', 'c', 'd']) - single_view.tags.update(['a', 'c', 'e']) + single_view._id = "e490bec4-2652-4fda-8c4e-f087db6fa328" + + self.server.views.populate_permissions(single_view) + permissions = single_view.permissions + + self.assertEqual(permissions[0].grantee.tag_name, "group") + self.assertEqual(permissions[0].grantee.id, "c8f2773a-c83a-11e8-8c8f-33e6d787b506") + self.assertDictEqual( + permissions[0].capabilities, + { + TSC.Permission.Capability.ViewComments: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Read: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.AddComment: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ExportData: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ExportImage: TSC.Permission.Mode.Allow, + }, + ) + + def test_add_permissions(self) -> None: + with open(UPDATE_PERMISSIONS, "rb") as f: + response_xml = f.read().decode("utf-8") + + single_view = TSC.ViewItem() + single_view._id = "21778de4-b7b9-44bc-a599-1506a2639ace" + + bob = UserItem.as_reference("7c37ee24-c4b1-42b6-a154-eaeab7ee330a") + group_of_people = GroupItem.as_reference("5e5e1978-71fa-11e4-87dd-7382f5c437af") + + new_permissions = [PermissionsRule(bob, {"Write": "Allow"}), PermissionsRule(group_of_people, {"Read": "Deny"})] + + with requests_mock.mock() as m: + m.put(self.baseurl + "/21778de4-b7b9-44bc-a599-1506a2639ace/permissions", text=response_xml) + permissions = self.server.views.update_permissions(single_view, new_permissions) + + self.assertEqual(permissions[0].grantee.tag_name, "group") + self.assertEqual(permissions[0].grantee.id, "5e5e1978-71fa-11e4-87dd-7382f5c437af") + self.assertDictEqual(permissions[0].capabilities, {TSC.Permission.Capability.Read: TSC.Permission.Mode.Deny}) + + self.assertEqual(permissions[1].grantee.tag_name, "user") + self.assertEqual(permissions[1].grantee.id, "7c37ee24-c4b1-42b6-a154-eaeab7ee330a") + self.assertDictEqual(permissions[1].capabilities, {TSC.Permission.Capability.Write: TSC.Permission.Mode.Allow}) + + def test_update_tags(self) -> None: + with open(ADD_TAGS_XML, "rb") as f: + add_tags_xml = f.read().decode("utf-8") + with open(UPDATE_XML, "rb") as f: + update_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.put(self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5/tags", text=add_tags_xml) + m.delete(self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5/tags/b", status_code=204) + m.delete(self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5/tags/d", status_code=204) + m.put(self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5", text=update_xml) + single_view = TSC.ViewItem() + single_view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" + single_view._initial_tags.update(["a", "b", "c", "d"]) + single_view.tags.update(["a", "c", "e"]) updated_view = self.server.views.update(single_view) self.assertEqual(single_view.tags, updated_view.tags) self.assertEqual(single_view._initial_tags, updated_view._initial_tags) + + def test_populate_excel(self) -> None: + self.server.version = "3.8" + self.baseurl = self.server.views.baseurl + with open(POPULATE_EXCEL, "rb") as f: + response = f.read() + with requests_mock.mock() as m: + m.get(self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5/crosstab/excel?maxAge=1", content=response) + single_view = TSC.ViewItem() + single_view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" + request_option = TSC.ExcelRequestOptions(maxage=1) + self.server.views.populate_excel(single_view, request_option) + + excel_file = b"".join(single_view.excel) + self.assertEqual(response, excel_file) + + def test_filter_excel(self) -> None: + self.server.version = "3.8" + self.baseurl = self.server.views.baseurl + with open(POPULATE_EXCEL, "rb") as f: + response = f.read() + with requests_mock.mock() as m: + m.get(self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5/crosstab/excel?maxAge=1", content=response) + single_view = TSC.ViewItem() + single_view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" + request_option = TSC.ExcelRequestOptions(maxage=1) + request_option.vf("stuff", "1") + self.server.views.populate_excel(single_view, request_option) + + excel_file = b"".join(single_view.excel) + self.assertEqual(response, excel_file) + + def test_pdf_height(self) -> None: + self.server.version = "3.8" + self.baseurl = self.server.views.baseurl + with open(POPULATE_PDF, "rb") as f: + response = f.read() + with requests_mock.mock() as m: + m.get( + self.baseurl + "/d79634e1-6063-4ec9-95ff-50acbf609ff5/pdf?vizHeight=1080&vizWidth=1920", + content=response, + ) + single_view = TSC.ViewItem() + single_view._id = "d79634e1-6063-4ec9-95ff-50acbf609ff5" + + req_option = TSC.PDFRequestOptions( + viz_height=1080, + viz_width=1920, + ) + + self.server.views.populate_pdf(single_view, req_option) + self.assertEqual(response, single_view.pdf) + + def test_pdf_errors(self) -> None: + req_option = TSC.PDFRequestOptions(viz_height=1080) + with self.assertRaises(ValueError): + req_option.get_query_params() + req_option = TSC.PDFRequestOptions(viz_width=1920) + with self.assertRaises(ValueError): + req_option.get_query_params() diff --git a/test/test_view_acceleration.py b/test/test_view_acceleration.py new file mode 100644 index 000000000..766831b0a --- /dev/null +++ b/test/test_view_acceleration.py @@ -0,0 +1,119 @@ +import os +import requests_mock +import unittest + +import tableauserverclient as TSC +from tableauserverclient.datetime_helpers import format_datetime + +TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets") + +GET_BY_ID_ACCELERATION_STATUS_XML = os.path.join(TEST_ASSET_DIR, "workbook_get_by_id_acceleration_status.xml") +POPULATE_VIEWS_XML = os.path.join(TEST_ASSET_DIR, "workbook_populate_views.xml") +UPDATE_VIEWS_ACCELERATION_STATUS_XML = os.path.join(TEST_ASSET_DIR, "workbook_update_views_acceleration_status.xml") +UPDATE_WORKBOOK_ACCELERATION_STATUS_XML = os.path.join(TEST_ASSET_DIR, "workbook_update_acceleration_status.xml") + + +class WorkbookTests(unittest.TestCase): + def setUp(self) -> None: + self.server = TSC.Server("http://test", False) + + # Fake sign in + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + + self.baseurl = self.server.workbooks.baseurl + + def test_get_by_id(self) -> None: + with open(GET_BY_ID_ACCELERATION_STATUS_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.baseurl + "/3cc6cd06-89ce-4fdc-b935-5294135d6d42", text=response_xml) + single_workbook = self.server.workbooks.get_by_id("3cc6cd06-89ce-4fdc-b935-5294135d6d42") + + self.assertEqual("3cc6cd06-89ce-4fdc-b935-5294135d6d42", single_workbook.id) + self.assertEqual("SafariSample", single_workbook.name) + self.assertEqual("SafariSample", single_workbook.content_url) + self.assertEqual("http://tableauserver/#/workbooks/2/views", single_workbook.webpage_url) + self.assertEqual(False, single_workbook.show_tabs) + self.assertEqual(26, single_workbook.size) + self.assertEqual("2016-07-26T20:34:56Z", format_datetime(single_workbook.created_at)) + self.assertEqual("description for SafariSample", single_workbook.description) + self.assertEqual("2016-07-26T20:35:05Z", format_datetime(single_workbook.updated_at)) + self.assertEqual("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", single_workbook.project_id) + self.assertEqual("default", single_workbook.project_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", single_workbook.owner_id) + self.assertEqual({"Safari", "Sample"}, single_workbook.tags) + self.assertEqual("d79634e1-6063-4ec9-95ff-50acbf609ff5", single_workbook.views[0].id) + self.assertEqual("ENDANGERED SAFARI", single_workbook.views[0].name) + self.assertEqual("SafariSample/sheets/ENDANGEREDSAFARI", single_workbook.views[0].content_url) + self.assertEqual(True, single_workbook.views[0].data_acceleration_config["acceleration_enabled"]) + self.assertEqual("Enabled", single_workbook.views[0].data_acceleration_config["acceleration_status"]) + self.assertEqual("d79634e1-6063-4ec9-95ff-50acbf609ff9", single_workbook.views[1].id) + self.assertEqual("ENDANGERED SAFARI 2", single_workbook.views[1].name) + self.assertEqual("SafariSample/sheets/ENDANGEREDSAFARI2", single_workbook.views[1].content_url) + self.assertEqual(False, single_workbook.views[1].data_acceleration_config["acceleration_enabled"]) + self.assertEqual("Suspended", single_workbook.views[1].data_acceleration_config["acceleration_status"]) + + def test_update_workbook_acceleration(self) -> None: + with open(UPDATE_WORKBOOK_ACCELERATION_STATUS_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.put(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2", text=response_xml) + single_workbook = TSC.WorkbookItem("1d0304cd-3796-429f-b815-7258370b9b74", show_tabs=True) + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + single_workbook.data_acceleration_config = { + "acceleration_enabled": True, + "accelerate_now": False, + "last_updated_at": None, + "acceleration_status": None, + } + # update with parameter includeViewAccelerationStatus=True + single_workbook = self.server.workbooks.update(single_workbook, True) + + self.assertEqual("1f951daf-4061-451a-9df1-69a8062664f2", single_workbook.id) + self.assertEqual("1d0304cd-3796-429f-b815-7258370b9b74", single_workbook.project_id) + self.assertEqual("SafariSample/sheets/ENDANGEREDSAFARI", single_workbook.views[0].content_url) + self.assertEqual(True, single_workbook.views[0].data_acceleration_config["acceleration_enabled"]) + self.assertEqual("Pending", single_workbook.views[0].data_acceleration_config["acceleration_status"]) + self.assertEqual("d79634e1-6063-4ec9-95ff-50acbf609ff9", single_workbook.views[1].id) + self.assertEqual("ENDANGERED SAFARI 2", single_workbook.views[1].name) + self.assertEqual("SafariSample/sheets/ENDANGEREDSAFARI2", single_workbook.views[1].content_url) + self.assertEqual(True, single_workbook.views[1].data_acceleration_config["acceleration_enabled"]) + self.assertEqual("Pending", single_workbook.views[1].data_acceleration_config["acceleration_status"]) + + def test_update_views_acceleration(self) -> None: + with open(POPULATE_VIEWS_XML, "rb") as f: + views_xml = f.read().decode("utf-8") + with open(UPDATE_VIEWS_ACCELERATION_STATUS_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/views", text=views_xml) + m.put(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2", text=response_xml) + single_workbook = TSC.WorkbookItem("1d0304cd-3796-429f-b815-7258370b9b74", show_tabs=True) + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + single_workbook.data_acceleration_config = { + "acceleration_enabled": False, + "accelerate_now": False, + "last_updated_at": None, + "acceleration_status": None, + } + self.server.workbooks.populate_views(single_workbook) + single_workbook.views = [single_workbook.views[1], single_workbook.views[2]] + # update with parameter includeViewAccelerationStatus=True + single_workbook = self.server.workbooks.update(single_workbook, True) + + views_list = single_workbook.views + self.assertEqual("097dbe13-de89-445f-b2c3-02f28bd010c1", views_list[0].id) + self.assertEqual("GDP per capita", views_list[0].name) + self.assertEqual(False, views_list[0].data_acceleration_config["acceleration_enabled"]) + self.assertEqual("Disabled", views_list[0].data_acceleration_config["acceleration_status"]) + + self.assertEqual("2c1ab9d7-8d64-4cc6-b495-52e40c60c330", views_list[1].id) + self.assertEqual("Country ranks", views_list[1].name) + self.assertEqual(True, views_list[1].data_acceleration_config["acceleration_enabled"]) + self.assertEqual("Pending", views_list[1].data_acceleration_config["acceleration_status"]) + + self.assertEqual("0599c28c-6d82-457e-a453-e52c1bdb00f5", views_list[2].id) + self.assertEqual("Interest rates", views_list[2].name) + self.assertEqual(True, views_list[2].data_acceleration_config["acceleration_enabled"]) + self.assertEqual("Pending", views_list[2].data_acceleration_config["acceleration_status"]) diff --git a/test/test_virtual_connection.py b/test/test_virtual_connection.py new file mode 100644 index 000000000..975033d2d --- /dev/null +++ b/test/test_virtual_connection.py @@ -0,0 +1,242 @@ +import json +from pathlib import Path +import unittest + +import requests_mock + +import tableauserverclient as TSC +from tableauserverclient.datetime_helpers import parse_datetime +from tableauserverclient.models.virtual_connection_item import VirtualConnectionItem + +ASSET_DIR = Path(__file__).parent / "assets" + +VIRTUAL_CONNECTION_GET_XML = ASSET_DIR / "virtual_connections_get.xml" +VIRTUAL_CONNECTION_POPULATE_CONNECTIONS = ASSET_DIR / "virtual_connection_populate_connections.xml" +VC_DB_CONN_UPDATE = ASSET_DIR / "virtual_connection_database_connection_update.xml" +VIRTUAL_CONNECTION_DOWNLOAD = ASSET_DIR / "virtual_connections_download.xml" +VIRTUAL_CONNECTION_UPDATE = ASSET_DIR / "virtual_connections_update.xml" +VIRTUAL_CONNECTION_REVISIONS = ASSET_DIR / "virtual_connections_revisions.xml" +VIRTUAL_CONNECTION_PUBLISH = ASSET_DIR / "virtual_connections_publish.xml" +ADD_PERMISSIONS = ASSET_DIR / "virtual_connection_add_permissions.xml" + + +class TestVirtualConnections(unittest.TestCase): + def setUp(self) -> None: + self.server = TSC.Server("http://test") + + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + self.server.version = "3.23" + + self.baseurl = f"{self.server.baseurl}/sites/{self.server.site_id}/virtualConnections" + return super().setUp() + + def test_from_xml(self): + items = VirtualConnectionItem.from_response(VIRTUAL_CONNECTION_GET_XML.read_bytes(), self.server.namespace) + + assert len(items) == 1 + virtual_connection = items[0] + assert virtual_connection.created_at == parse_datetime("2024-05-30T09:00:00Z") + assert not virtual_connection.has_extracts + assert virtual_connection.id == "8fd7cc02-bb55-4d15-b8b1-9650239efe79" + assert virtual_connection.is_certified + assert virtual_connection.name == "vconn" + assert virtual_connection.updated_at == parse_datetime("2024-06-18T09:00:00Z") + assert virtual_connection.webpage_url == "https://test/#/site/site-name/virtualconnections/3" + + def test_virtual_connection_get(self): + with requests_mock.mock() as m: + m.get(self.baseurl, text=VIRTUAL_CONNECTION_GET_XML.read_text()) + items, pagination_item = self.server.virtual_connections.get() + + assert len(items) == 1 + assert pagination_item.total_available == 1 + assert items[0].name == "vconn" + + def test_virtual_connection_populate_connections(self): + vconn = VirtualConnectionItem("vconn") + vconn._id = "8fd7cc02-bb55-4d15-b8b1-9650239efe79" + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/{vconn.id}/connections", text=VIRTUAL_CONNECTION_POPULATE_CONNECTIONS.read_text()) + vc_out = self.server.virtual_connections.populate_connections(vconn) + connection_list = list(vconn.connections) + + assert vc_out is vconn + assert vc_out._connections is not None + + assert len(connection_list) == 1 + connection = connection_list[0] + assert connection.id == "37ca6ced-58d7-4dcf-99dc-f0a85223cbef" + assert connection.connection_type == "postgres" + assert connection.server_address == "localhost" + assert connection.server_port == "5432" + assert connection.username == "pgadmin" + + def test_virtual_connection_update_connection_db_connection(self): + vconn = VirtualConnectionItem("vconn") + vconn._id = "8fd7cc02-bb55-4d15-b8b1-9650239efe79" + connection = TSC.ConnectionItem() + connection._id = "37ca6ced-58d7-4dcf-99dc-f0a85223cbef" + connection.server_address = "localhost" + connection.server_port = "5432" + connection.username = "pgadmin" + connection.password = "password" + with requests_mock.mock() as m: + m.put(f"{self.baseurl}/{vconn.id}/connections/{connection.id}/modify", text=VC_DB_CONN_UPDATE.read_text()) + updated_connection = self.server.virtual_connections.update_connection_db_connection(vconn, connection) + + assert updated_connection.id == "37ca6ced-58d7-4dcf-99dc-f0a85223cbef" + assert updated_connection.server_address == "localhost" + assert updated_connection.server_port == "5432" + assert updated_connection.username == "pgadmin" + assert updated_connection.password is None + + def test_virtual_connection_get_by_id(self): + vconn = VirtualConnectionItem("vconn") + vconn._id = "8fd7cc02-bb55-4d15-b8b1-9650239efe79" + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/{vconn.id}", text=VIRTUAL_CONNECTION_DOWNLOAD.read_text()) + vconn = self.server.virtual_connections.get_by_id(vconn) + + assert vconn.content + assert vconn.created_at is None + assert vconn.id is None + assert "policyCollection" in vconn.content + assert "revision" in vconn.content + + def test_virtual_connection_update(self): + vconn = VirtualConnectionItem("vconn") + vconn._id = "8fd7cc02-bb55-4d15-b8b1-9650239efe79" + vconn.is_certified = True + vconn.certification_note = "demo certification note" + vconn.project_id = "5286d663-8668-4ac2-8c8d-91af7d585f6b" + vconn.owner_id = "9324cf6b-ba72-4b8e-b895-ac3f28d2f0e0" + with requests_mock.mock() as m: + m.put(f"{self.baseurl}/{vconn.id}", text=VIRTUAL_CONNECTION_UPDATE.read_text()) + vconn = self.server.virtual_connections.update(vconn) + + assert not vconn.has_extracts + assert vconn.id is None + assert vconn.is_certified + assert vconn.name == "testv1" + assert vconn.certification_note == "demo certification note" + assert vconn.project_id == "5286d663-8668-4ac2-8c8d-91af7d585f6b" + assert vconn.owner_id == "9324cf6b-ba72-4b8e-b895-ac3f28d2f0e0" + + def test_virtual_connection_get_revisions(self): + vconn = VirtualConnectionItem("vconn") + vconn._id = "8fd7cc02-bb55-4d15-b8b1-9650239efe79" + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/{vconn.id}/revisions", text=VIRTUAL_CONNECTION_REVISIONS.read_text()) + revisions, pagination_item = self.server.virtual_connections.get_revisions(vconn) + + assert len(revisions) == 3 + assert pagination_item.total_available == 3 + assert revisions[0].resource_id == vconn.id + assert revisions[0].resource_name == vconn.name + assert revisions[0].created_at == parse_datetime("2016-07-26T20:34:56Z") + assert revisions[0].revision_number == "1" + assert not revisions[0].current + assert not revisions[0].deleted + assert revisions[0].user_name == "Cassie" + assert revisions[0].user_id == "5de011f8-5aa9-4d5b-b991-f462c8dd6bb7" + assert revisions[1].resource_id == vconn.id + assert revisions[1].resource_name == vconn.name + assert revisions[1].created_at == parse_datetime("2016-07-27T20:34:56Z") + assert revisions[1].revision_number == "2" + assert not revisions[1].current + assert not revisions[1].deleted + assert revisions[2].resource_id == vconn.id + assert revisions[2].resource_name == vconn.name + assert revisions[2].created_at == parse_datetime("2016-07-28T20:34:56Z") + assert revisions[2].revision_number == "3" + assert revisions[2].current + assert not revisions[2].deleted + assert revisions[2].user_name == "Cassie" + assert revisions[2].user_id == "5de011f8-5aa9-4d5b-b991-f462c8dd6bb7" + + def test_virtual_connection_download_revision(self): + vconn = VirtualConnectionItem("vconn") + vconn._id = "8fd7cc02-bb55-4d15-b8b1-9650239efe79" + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/{vconn.id}/revisions/1", text=VIRTUAL_CONNECTION_DOWNLOAD.read_text()) + content = self.server.virtual_connections.download_revision(vconn, 1) + + assert content + assert "policyCollection" in content + data = json.loads(content) + assert "policyCollection" in data + assert "revision" in data + + def test_virtual_connection_delete(self): + vconn = VirtualConnectionItem("vconn") + vconn._id = "8fd7cc02-bb55-4d15-b8b1-9650239efe79" + with requests_mock.mock() as m: + m.delete(f"{self.baseurl}/{vconn.id}") + self.server.virtual_connections.delete(vconn) + self.server.virtual_connections.delete(vconn.id) + + assert m.call_count == 2 + + def test_virtual_connection_publish(self): + vconn = VirtualConnectionItem("vconn") + vconn._id = "8fd7cc02-bb55-4d15-b8b1-9650239efe79" + vconn.project_id = "9836791c-9468-40f0-b7f3-d10b9562a046" + vconn.owner_id = "ee8bc9ca-77fe-4ae0-8093-cf77f0ee67a9" + with requests_mock.mock() as m: + m.post(f"{self.baseurl}?overwrite=false&publishAsDraft=false", text=VIRTUAL_CONNECTION_PUBLISH.read_text()) + vconn = self.server.virtual_connections.publish( + vconn, '{"test": 0}', mode="CreateNew", publish_as_draft=False + ) + + assert vconn.name == "vconn_test" + assert vconn.owner_id == "ee8bc9ca-77fe-4ae0-8093-cf77f0ee67a9" + assert vconn.project_id == "9836791c-9468-40f0-b7f3-d10b9562a046" + assert vconn.content + assert "policyCollection" in vconn.content + assert "revision" in vconn.content + + def test_virtual_connection_publish_draft_overwrite(self): + vconn = VirtualConnectionItem("vconn") + vconn._id = "8fd7cc02-bb55-4d15-b8b1-9650239efe79" + vconn.project_id = "9836791c-9468-40f0-b7f3-d10b9562a046" + vconn.owner_id = "ee8bc9ca-77fe-4ae0-8093-cf77f0ee67a9" + with requests_mock.mock() as m: + m.post(f"{self.baseurl}?overwrite=true&publishAsDraft=true", text=VIRTUAL_CONNECTION_PUBLISH.read_text()) + vconn = self.server.virtual_connections.publish( + vconn, '{"test": 0}', mode="Overwrite", publish_as_draft=True + ) + + assert vconn.name == "vconn_test" + assert vconn.owner_id == "ee8bc9ca-77fe-4ae0-8093-cf77f0ee67a9" + assert vconn.project_id == "9836791c-9468-40f0-b7f3-d10b9562a046" + assert vconn.content + assert "policyCollection" in vconn.content + assert "revision" in vconn.content + + def test_add_permissions(self) -> None: + with open(ADD_PERMISSIONS, "rb") as f: + response_xml = f.read().decode("utf-8") + + single_virtual_connection = TSC.VirtualConnectionItem("test") + single_virtual_connection._id = "21778de4-b7b9-44bc-a599-1506a2639ace" + + bob = TSC.UserItem.as_reference("7c37ee24-c4b1-42b6-a154-eaeab7ee330a") + group_of_people = TSC.GroupItem.as_reference("5e5e1978-71fa-11e4-87dd-7382f5c437af") + + new_permissions = [ + TSC.PermissionsRule(bob, {"Write": "Allow"}), + TSC.PermissionsRule(group_of_people, {"Read": "Deny"}), + ] + + with requests_mock.mock() as m: + m.put(self.baseurl + "/21778de4-b7b9-44bc-a599-1506a2639ace/permissions", text=response_xml) + permissions = self.server.virtual_connections.add_permissions(single_virtual_connection, new_permissions) + + self.assertEqual(permissions[0].grantee.tag_name, "group") + self.assertEqual(permissions[0].grantee.id, "5e5e1978-71fa-11e4-87dd-7382f5c437af") + self.assertDictEqual(permissions[0].capabilities, {TSC.Permission.Capability.Read: TSC.Permission.Mode.Deny}) + + self.assertEqual(permissions[1].grantee.tag_name, "user") + self.assertEqual(permissions[1].grantee.id, "7c37ee24-c4b1-42b6-a154-eaeab7ee330a") + self.assertDictEqual(permissions[1].capabilities, {TSC.Permission.Capability.Write: TSC.Permission.Mode.Allow}) diff --git a/test/test_webhook.py b/test/test_webhook.py new file mode 100644 index 000000000..5f26266b2 --- /dev/null +++ b/test/test_webhook.py @@ -0,0 +1,84 @@ +import os +import unittest + +import requests_mock + +import tableauserverclient as TSC +from tableauserverclient.server import RequestFactory +from tableauserverclient.models import WebhookItem +from ._utils import asset + +TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets") + +GET_XML = asset("webhook_get.xml") +CREATE_XML = asset("webhook_create.xml") +CREATE_REQUEST_XML = asset("webhook_create_request.xml") + + +class WebhookTests(unittest.TestCase): + def setUp(self) -> None: + self.server = TSC.Server("http://test", False) + self.server.version = "3.6" + + # Fake signin + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" + + self.baseurl = self.server.webhooks.baseurl + + def test_get(self) -> None: + with open(GET_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.baseurl, text=response_xml) + webhooks, _ = self.server.webhooks.get() + self.assertEqual(len(webhooks), 1) + webhook = webhooks[0] + + self.assertEqual(webhook.url, "url") + self.assertEqual(webhook.event, "datasource-created") + self.assertEqual(webhook.owner_id, "webhook_owner_luid") + self.assertEqual(webhook.name, "webhook-name") + self.assertEqual(webhook.id, "webhook-id") + + def test_get_before_signin(self) -> None: + self.server._auth_token = None + self.assertRaises(TSC.NotSignedInError, self.server.webhooks.get) + + def test_delete(self) -> None: + with requests_mock.mock() as m: + m.delete(self.baseurl + "/ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", status_code=204) + self.server.webhooks.delete("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760") + + def test_delete_missing_id(self) -> None: + self.assertRaises(ValueError, self.server.webhooks.delete, "") + + def test_test(self) -> None: + with requests_mock.mock() as m: + m.get(self.baseurl + "/ee8c6e70-43b6-11e6-af4f-f7b0d8e20760/test", status_code=200) + self.server.webhooks.test("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760") + + def test_create(self) -> None: + with open(CREATE_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.baseurl, text=response_xml) + webhook_model = TSC.WebhookItem() + webhook_model.name = "Test Webhook" + webhook_model.url = "https://ifttt.com/maker-url" + webhook_model.event = "datasource-created" + + new_webhook = self.server.webhooks.create(webhook_model) + + self.assertNotEqual(new_webhook.id, None) + + def test_request_factory(self): + with open(CREATE_REQUEST_XML, "rb") as f: + webhook_request_expected = f.read().decode("utf-8") + + webhook_item = WebhookItem() + webhook_item._set_values("webhook-id", "webhook-name", "url", "api-event-name", None) + webhook_request_actual = "{}\n".format(RequestFactory.Webhook.create_req(webhook_item).decode("utf-8")) + self.maxDiff = None + # windows does /r/n for linebreaks, remove the extra char if it is there + self.assertEqual(webhook_request_expected.replace("\r", ""), webhook_request_actual) diff --git a/test/test_workbook.py b/test/test_workbook.py index 41bbc440c..0aa52f50d 100644 --- a/test/test_workbook.py +++ b/test/test_workbook.py @@ -1,76 +1,106 @@ -import unittest import os +import re import requests_mock -import tableauserverclient as TSC -import xml.etree.ElementTree as ET +import tempfile +import unittest +from defusedxml.ElementTree import fromstring +from io import BytesIO +from pathlib import Path + +import pytest +import tableauserverclient as TSC from tableauserverclient.datetime_helpers import format_datetime +from tableauserverclient.models import UserItem, GroupItem, PermissionsRule +from tableauserverclient.server.endpoint.exceptions import InternalServerError from tableauserverclient.server.request_factory import RequestFactory - -TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets') - -ADD_TAGS_XML = os.path.join(TEST_ASSET_DIR, 'workbook_add_tags.xml') -GET_BY_ID_XML = os.path.join(TEST_ASSET_DIR, 'workbook_get_by_id.xml') -GET_EMPTY_XML = os.path.join(TEST_ASSET_DIR, 'workbook_get_empty.xml') -GET_XML = os.path.join(TEST_ASSET_DIR, 'workbook_get.xml') -POPULATE_CONNECTIONS_XML = os.path.join(TEST_ASSET_DIR, 'workbook_populate_connections.xml') -POPULATE_PDF = os.path.join(TEST_ASSET_DIR, 'populate_pdf.pdf') -POPULATE_PREVIEW_IMAGE = os.path.join(TEST_ASSET_DIR, 'RESTAPISample Image.png') -POPULATE_VIEWS_XML = os.path.join(TEST_ASSET_DIR, 'workbook_populate_views.xml') -POPULATE_VIEWS_USAGE_XML = os.path.join(TEST_ASSET_DIR, 'workbook_populate_views_usage.xml') -PUBLISH_XML = os.path.join(TEST_ASSET_DIR, 'workbook_publish.xml') -PUBLISH_ASYNC_XML = os.path.join(TEST_ASSET_DIR, 'workbook_publish_async.xml') -UPDATE_XML = os.path.join(TEST_ASSET_DIR, 'workbook_update.xml') +from ._utils import asset + +TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets") + +ADD_TAGS_XML = os.path.join(TEST_ASSET_DIR, "workbook_add_tags.xml") +GET_BY_ID_XML = os.path.join(TEST_ASSET_DIR, "workbook_get_by_id.xml") +GET_BY_ID_XML_PERSONAL = os.path.join(TEST_ASSET_DIR, "workbook_get_by_id_personal.xml") +GET_EMPTY_XML = os.path.join(TEST_ASSET_DIR, "workbook_get_empty.xml") +GET_INVALID_DATE_XML = os.path.join(TEST_ASSET_DIR, "workbook_get_invalid_date.xml") +GET_XML = os.path.join(TEST_ASSET_DIR, "workbook_get.xml") +ODATA_XML = os.path.join(TEST_ASSET_DIR, "odata_connection.xml") +POPULATE_CONNECTIONS_XML = os.path.join(TEST_ASSET_DIR, "workbook_populate_connections.xml") +POPULATE_PDF = os.path.join(TEST_ASSET_DIR, "populate_pdf.pdf") +POPULATE_POWERPOINT = os.path.join(TEST_ASSET_DIR, "populate_powerpoint.pptx") +POPULATE_PERMISSIONS_XML = os.path.join(TEST_ASSET_DIR, "workbook_populate_permissions.xml") +POPULATE_PREVIEW_IMAGE = os.path.join(TEST_ASSET_DIR, "RESTAPISample Image.png") +POPULATE_VIEWS_XML = os.path.join(TEST_ASSET_DIR, "workbook_populate_views.xml") +POPULATE_VIEWS_USAGE_XML = os.path.join(TEST_ASSET_DIR, "workbook_populate_views_usage.xml") +PUBLISH_XML = os.path.join(TEST_ASSET_DIR, "workbook_publish.xml") +PUBLISH_ASYNC_XML = os.path.join(TEST_ASSET_DIR, "workbook_publish_async.xml") +REFRESH_XML = os.path.join(TEST_ASSET_DIR, "workbook_refresh.xml") +REVISION_XML = os.path.join(TEST_ASSET_DIR, "workbook_revision.xml") +UPDATE_XML = os.path.join(TEST_ASSET_DIR, "workbook_update.xml") +UPDATE_PERMISSIONS = os.path.join(TEST_ASSET_DIR, "workbook_update_permissions.xml") class WorkbookTests(unittest.TestCase): - def setUp(self): - self.server = TSC.Server('http://test') + def setUp(self) -> None: + self.server = TSC.Server("http://test", False) # Fake sign in - self.server._site_id = 'dad65087-b08b-4603-af4e-2887b8aafc67' - self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM' + self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67" + self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM" self.baseurl = self.server.workbooks.baseurl - def test_get(self): - with open(GET_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_get(self) -> None: + with open(GET_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.get(self.baseurl, text=response_xml) all_workbooks, pagination_item = self.server.workbooks.get() self.assertEqual(2, pagination_item.total_available) - self.assertEqual('6d13b0ca-043d-4d42-8c9d-3f3313ea3a00', all_workbooks[0].id) - self.assertEqual('Superstore', all_workbooks[0].name) - self.assertEqual('Superstore', all_workbooks[0].content_url) + self.assertEqual("6d13b0ca-043d-4d42-8c9d-3f3313ea3a00", all_workbooks[0].id) + self.assertEqual("Superstore", all_workbooks[0].name) + self.assertEqual("Superstore", all_workbooks[0].content_url) self.assertEqual(False, all_workbooks[0].show_tabs) + self.assertEqual("http://tableauserver/#/workbooks/1/views", all_workbooks[0].webpage_url) self.assertEqual(1, all_workbooks[0].size) - self.assertEqual('2016-08-03T20:34:04Z', format_datetime(all_workbooks[0].created_at)) - self.assertEqual('2016-08-04T17:56:41Z', format_datetime(all_workbooks[0].updated_at)) - self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', all_workbooks[0].project_id) - self.assertEqual('default', all_workbooks[0].project_name) - self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', all_workbooks[0].owner_id) - - self.assertEqual('3cc6cd06-89ce-4fdc-b935-5294135d6d42', all_workbooks[1].id) - self.assertEqual('SafariSample', all_workbooks[1].name) - self.assertEqual('SafariSample', all_workbooks[1].content_url) + self.assertEqual("2016-08-03T20:34:04Z", format_datetime(all_workbooks[0].created_at)) + self.assertEqual("description for Superstore", all_workbooks[0].description) + self.assertEqual("2016-08-04T17:56:41Z", format_datetime(all_workbooks[0].updated_at)) + self.assertEqual("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", all_workbooks[0].project_id) + self.assertEqual("default", all_workbooks[0].project_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", all_workbooks[0].owner_id) + + self.assertEqual("3cc6cd06-89ce-4fdc-b935-5294135d6d42", all_workbooks[1].id) + self.assertEqual("SafariSample", all_workbooks[1].name) + self.assertEqual("SafariSample", all_workbooks[1].content_url) + self.assertEqual("http://tableauserver/#/workbooks/2/views", all_workbooks[1].webpage_url) self.assertEqual(False, all_workbooks[1].show_tabs) self.assertEqual(26, all_workbooks[1].size) - self.assertEqual('2016-07-26T20:34:56Z', format_datetime(all_workbooks[1].created_at)) - self.assertEqual('2016-07-26T20:35:05Z', format_datetime(all_workbooks[1].updated_at)) - self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', all_workbooks[1].project_id) - self.assertEqual('default', all_workbooks[1].project_name) - self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', all_workbooks[1].owner_id) - self.assertEqual(set(['Safari', 'Sample']), all_workbooks[1].tags) - - def test_get_before_signin(self): + self.assertEqual("2016-07-26T20:34:56Z", format_datetime(all_workbooks[1].created_at)) + self.assertEqual("description for SafariSample", all_workbooks[1].description) + self.assertEqual("2016-07-26T20:35:05Z", format_datetime(all_workbooks[1].updated_at)) + self.assertEqual("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", all_workbooks[1].project_id) + self.assertEqual("default", all_workbooks[1].project_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", all_workbooks[1].owner_id) + self.assertEqual({"Safari", "Sample"}, all_workbooks[1].tags) + + def test_get_ignore_invalid_date(self) -> None: + with open(GET_INVALID_DATE_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.baseurl, text=response_xml) + all_workbooks, pagination_item = self.server.workbooks.get() + self.assertEqual(None, format_datetime(all_workbooks[0].created_at)) + self.assertEqual("2016-08-04T17:56:41Z", format_datetime(all_workbooks[0].updated_at)) + + def test_get_before_signin(self) -> None: self.server._auth_token = None self.assertRaises(TSC.NotSignedInError, self.server.workbooks.get) - def test_get_empty(self): - with open(GET_EMPTY_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_get_empty(self) -> None: + with open(GET_EMPTY_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.get(self.baseurl, text=response_xml) all_workbooks, pagination_item = self.server.workbooks.get() @@ -78,74 +108,125 @@ def test_get_empty(self): self.assertEqual(0, pagination_item.total_available) self.assertEqual([], all_workbooks) - def test_get_by_id(self): - with open(GET_BY_ID_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_get_by_id(self) -> None: + with open(GET_BY_ID_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.baseurl + "/3cc6cd06-89ce-4fdc-b935-5294135d6d42", text=response_xml) + single_workbook = self.server.workbooks.get_by_id("3cc6cd06-89ce-4fdc-b935-5294135d6d42") + + self.assertEqual("3cc6cd06-89ce-4fdc-b935-5294135d6d42", single_workbook.id) + self.assertEqual("SafariSample", single_workbook.name) + self.assertEqual("SafariSample", single_workbook.content_url) + self.assertEqual("http://tableauserver/#/workbooks/2/views", single_workbook.webpage_url) + self.assertEqual(False, single_workbook.show_tabs) + self.assertEqual(26, single_workbook.size) + self.assertEqual("2016-07-26T20:34:56Z", format_datetime(single_workbook.created_at)) + self.assertEqual("description for SafariSample", single_workbook.description) + self.assertEqual("2016-07-26T20:35:05Z", format_datetime(single_workbook.updated_at)) + self.assertEqual("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", single_workbook.project_id) + self.assertEqual("default", single_workbook.project_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", single_workbook.owner_id) + self.assertEqual({"Safari", "Sample"}, single_workbook.tags) + self.assertEqual("d79634e1-6063-4ec9-95ff-50acbf609ff5", single_workbook.views[0].id) + self.assertEqual("ENDANGERED SAFARI", single_workbook.views[0].name) + self.assertEqual("SafariSample/sheets/ENDANGEREDSAFARI", single_workbook.views[0].content_url) + + def test_get_by_id_personal(self) -> None: + # workbooks in personal space don't have project_id or project_name + with open(GET_BY_ID_XML_PERSONAL, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.baseurl + '/3cc6cd06-89ce-4fdc-b935-5294135d6d42', text=response_xml) - single_workbook = self.server.workbooks.get_by_id('3cc6cd06-89ce-4fdc-b935-5294135d6d42') + m.get(self.baseurl + "/3cc6cd06-89ce-4fdc-b935-5294135d6d43", text=response_xml) + single_workbook = self.server.workbooks.get_by_id("3cc6cd06-89ce-4fdc-b935-5294135d6d43") - self.assertEqual('3cc6cd06-89ce-4fdc-b935-5294135d6d42', single_workbook.id) - self.assertEqual('SafariSample', single_workbook.name) - self.assertEqual('SafariSample', single_workbook.content_url) + self.assertEqual("3cc6cd06-89ce-4fdc-b935-5294135d6d43", single_workbook.id) + self.assertEqual("SafariSample", single_workbook.name) + self.assertEqual("SafariSample", single_workbook.content_url) + self.assertEqual("http://tableauserver/#/workbooks/2/views", single_workbook.webpage_url) self.assertEqual(False, single_workbook.show_tabs) self.assertEqual(26, single_workbook.size) - self.assertEqual('2016-07-26T20:34:56Z', format_datetime(single_workbook.created_at)) - self.assertEqual('2016-07-26T20:35:05Z', format_datetime(single_workbook.updated_at)) - self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', single_workbook.project_id) - self.assertEqual('default', single_workbook.project_name) - self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', single_workbook.owner_id) - self.assertEqual(set(['Safari', 'Sample']), single_workbook.tags) - self.assertEqual('d79634e1-6063-4ec9-95ff-50acbf609ff5', single_workbook.views[0].id) - self.assertEqual('ENDANGERED SAFARI', single_workbook.views[0].name) - self.assertEqual('SafariSample/sheets/ENDANGEREDSAFARI', single_workbook.views[0].content_url) - - def test_get_by_id_missing_id(self): - self.assertRaises(ValueError, self.server.workbooks.get_by_id, '') - - def test_delete(self): - with requests_mock.mock() as m: - m.delete(self.baseurl + '/3cc6cd06-89ce-4fdc-b935-5294135d6d42', status_code=204) - self.server.workbooks.delete('3cc6cd06-89ce-4fdc-b935-5294135d6d42') - - def test_delete_missing_id(self): - self.assertRaises(ValueError, self.server.workbooks.delete, '') - - def test_update(self): - with open(UPDATE_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') - with requests_mock.mock() as m: - m.put(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2', text=response_xml) - single_workbook = TSC.WorkbookItem('1d0304cd-3796-429f-b815-7258370b9b74', show_tabs=True) - single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2' - single_workbook.owner_id = 'dd2239f6-ddf1-4107-981a-4cf94e415794' - single_workbook.name = 'renamedWorkbook' - single_workbook.materialized_views_config = {'materialized_views_enabled': True, - 'run_materialization_now': False} + self.assertEqual("2016-07-26T20:34:56Z", format_datetime(single_workbook.created_at)) + self.assertEqual("description for SafariSample", single_workbook.description) + self.assertEqual("2016-07-26T20:35:05Z", format_datetime(single_workbook.updated_at)) + self.assertTrue(single_workbook.project_id) + self.assertIsNone(single_workbook.project_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", single_workbook.owner_id) + self.assertEqual({"Safari", "Sample"}, single_workbook.tags) + self.assertEqual("d79634e1-6063-4ec9-95ff-50acbf609ff5", single_workbook.views[0].id) + self.assertEqual("ENDANGERED SAFARI", single_workbook.views[0].name) + self.assertEqual("SafariSample/sheets/ENDANGEREDSAFARI", single_workbook.views[0].content_url) + + def test_get_by_id_missing_id(self) -> None: + self.assertRaises(ValueError, self.server.workbooks.get_by_id, "") + + def test_refresh_id(self) -> None: + self.server.version = "2.8" + self.baseurl = self.server.workbooks.baseurl + with open(REFRESH_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.baseurl + "/3cc6cd06-89ce-4fdc-b935-5294135d6d42/refresh", status_code=202, text=response_xml) + self.server.workbooks.refresh("3cc6cd06-89ce-4fdc-b935-5294135d6d42") + + def test_refresh_object(self) -> None: + self.server.version = "2.8" + self.baseurl = self.server.workbooks.baseurl + workbook = TSC.WorkbookItem("") + workbook._id = "3cc6cd06-89ce-4fdc-b935-5294135d6d42" + with open(REFRESH_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.baseurl + "/3cc6cd06-89ce-4fdc-b935-5294135d6d42/refresh", status_code=202, text=response_xml) + self.server.workbooks.refresh(workbook) + + def test_delete(self) -> None: + with requests_mock.mock() as m: + m.delete(self.baseurl + "/3cc6cd06-89ce-4fdc-b935-5294135d6d42", status_code=204) + self.server.workbooks.delete("3cc6cd06-89ce-4fdc-b935-5294135d6d42") + + def test_delete_missing_id(self) -> None: + self.assertRaises(ValueError, self.server.workbooks.delete, "") + + def test_update(self) -> None: + with open(UPDATE_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.put(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2", text=response_xml) + single_workbook = TSC.WorkbookItem("1d0304cd-3796-429f-b815-7258370b9b74", show_tabs=True) + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + single_workbook.owner_id = "dd2239f6-ddf1-4107-981a-4cf94e415794" + single_workbook.name = "renamedWorkbook" + single_workbook.data_acceleration_config = { + "acceleration_enabled": True, + "accelerate_now": False, + "last_updated_at": None, + "acceleration_status": None, + } single_workbook = self.server.workbooks.update(single_workbook) - self.assertEqual('1f951daf-4061-451a-9df1-69a8062664f2', single_workbook.id) + self.assertEqual("1f951daf-4061-451a-9df1-69a8062664f2", single_workbook.id) self.assertEqual(True, single_workbook.show_tabs) - self.assertEqual('1d0304cd-3796-429f-b815-7258370b9b74', single_workbook.project_id) - self.assertEqual('dd2239f6-ddf1-4107-981a-4cf94e415794', single_workbook.owner_id) - self.assertEqual('renamedWorkbook', single_workbook.name) - self.assertEqual(True, single_workbook.materialized_views_config['materialized_views_enabled']) - self.assertEqual(False, single_workbook.materialized_views_config['run_materialization_now']) - - def test_update_missing_id(self): - single_workbook = TSC.WorkbookItem('test') + self.assertEqual("1d0304cd-3796-429f-b815-7258370b9b74", single_workbook.project_id) + self.assertEqual("dd2239f6-ddf1-4107-981a-4cf94e415794", single_workbook.owner_id) + self.assertEqual("renamedWorkbook", single_workbook.name) + self.assertEqual(True, single_workbook.data_acceleration_config["acceleration_enabled"]) + self.assertEqual(False, single_workbook.data_acceleration_config["accelerate_now"]) + + def test_update_missing_id(self) -> None: + single_workbook = TSC.WorkbookItem("test") self.assertRaises(TSC.MissingRequiredFieldError, self.server.workbooks.update, single_workbook) - def test_update_copy_fields(self): - with open(POPULATE_CONNECTIONS_XML, 'rb') as f: - connection_xml = f.read().decode('utf-8') - with open(UPDATE_XML, 'rb') as f: - update_xml = f.read().decode('utf-8') + def test_update_copy_fields(self) -> None: + with open(POPULATE_CONNECTIONS_XML, "rb") as f: + connection_xml = f.read().decode("utf-8") + with open(UPDATE_XML, "rb") as f: + update_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/connections', text=connection_xml) - m.put(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2', text=update_xml) - single_workbook = TSC.WorkbookItem('1d0304cd-3796-429f-b815-7258370b9b74') - single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2' + m.get(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/connections", text=connection_xml) + m.put(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2", text=update_xml) + single_workbook = TSC.WorkbookItem("1d0304cd-3796-429f-b815-7258370b9b74") + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" self.server.workbooks.populate_connections(single_workbook) updated_workbook = self.server.workbooks.update(single_workbook) @@ -155,135 +236,212 @@ def test_update_copy_fields(self): self.assertEqual(single_workbook._initial_tags, updated_workbook._initial_tags) self.assertEqual(single_workbook._preview_image, updated_workbook._preview_image) - def test_update_tags(self): - with open(ADD_TAGS_XML, 'rb') as f: - add_tags_xml = f.read().decode('utf-8') - with open(UPDATE_XML, 'rb') as f: - update_xml = f.read().decode('utf-8') - with requests_mock.mock() as m: - m.put(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/tags', text=add_tags_xml) - m.delete(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/tags/b', status_code=204) - m.delete(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/tags/d', status_code=204) - m.put(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2', text=update_xml) - single_workbook = TSC.WorkbookItem('1d0304cd-3796-429f-b815-7258370b9b74') - single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2' - single_workbook._initial_tags.update(['a', 'b', 'c', 'd']) - single_workbook.tags.update(['a', 'c', 'e']) + def test_update_tags(self) -> None: + with open(ADD_TAGS_XML, "rb") as f: + add_tags_xml = f.read().decode("utf-8") + with open(UPDATE_XML, "rb") as f: + update_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.put(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/tags", text=add_tags_xml) + m.delete(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/tags/b", status_code=204) + m.delete(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/tags/d", status_code=204) + m.put(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2", text=update_xml) + single_workbook = TSC.WorkbookItem("1d0304cd-3796-429f-b815-7258370b9b74") + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + single_workbook._initial_tags.update(["a", "b", "c", "d"]) + single_workbook.tags.update(["a", "c", "e"]) updated_workbook = self.server.workbooks.update(single_workbook) self.assertEqual(single_workbook.tags, updated_workbook.tags) self.assertEqual(single_workbook._initial_tags, updated_workbook._initial_tags) - def test_download(self): + def test_download(self) -> None: with requests_mock.mock() as m: - m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/content', - headers={'Content-Disposition': 'name="tableau_workbook"; filename="RESTAPISample.twbx"'}) - file_path = self.server.workbooks.download('1f951daf-4061-451a-9df1-69a8062664f2') + m.get( + self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/content", + headers={"Content-Disposition": 'name="tableau_workbook"; filename="RESTAPISample.twbx"'}, + ) + file_path = self.server.workbooks.download("1f951daf-4061-451a-9df1-69a8062664f2") self.assertTrue(os.path.exists(file_path)) os.remove(file_path) - def test_download_sanitizes_name(self): + def test_download_object(self) -> None: + with BytesIO() as file_object: + with requests_mock.mock() as m: + m.get( + self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/content", + headers={"Content-Disposition": 'name="tableau_workbook"; filename="RESTAPISample.twbx"'}, + ) + file_path = self.server.workbooks.download("1f951daf-4061-451a-9df1-69a8062664f2", filepath=file_object) + self.assertTrue(isinstance(file_path, BytesIO)) + + def test_download_sanitizes_name(self) -> None: filename = "Name,With,Commas.twbx" - disposition = 'name="tableau_workbook"; filename="{}"'.format(filename) + disposition = f'name="tableau_workbook"; filename="{filename}"' with requests_mock.mock() as m: - m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/content', - headers={'Content-Disposition': disposition}) - file_path = self.server.workbooks.download('1f951daf-4061-451a-9df1-69a8062664f2') + m.get( + self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/content", + headers={"Content-Disposition": disposition}, + ) + file_path = self.server.workbooks.download("1f951daf-4061-451a-9df1-69a8062664f2") self.assertEqual(os.path.basename(file_path), "NameWithCommas.twbx") self.assertTrue(os.path.exists(file_path)) os.remove(file_path) - def test_download_extract_only(self): + def test_download_extract_only(self) -> None: # Pretend we're 2.5 for 'extract_only' self.server.version = "2.5" self.baseurl = self.server.workbooks.baseurl with requests_mock.mock() as m: - m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/content?includeExtract=False', - headers={'Content-Disposition': 'name="tableau_workbook"; filename="RESTAPISample.twbx"'}, - complete_qs=True) + m.get( + self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/content?includeExtract=False", + headers={"Content-Disposition": 'name="tableau_workbook"; filename="RESTAPISample.twbx"'}, + complete_qs=True, + ) # Technically this shouldn't download a twbx, but we are interested in the qs, not the file - file_path = self.server.workbooks.download('1f951daf-4061-451a-9df1-69a8062664f2', include_extract=False) + file_path = self.server.workbooks.download("1f951daf-4061-451a-9df1-69a8062664f2", include_extract=False) self.assertTrue(os.path.exists(file_path)) os.remove(file_path) - def test_download_missing_id(self): - self.assertRaises(ValueError, self.server.workbooks.download, '') + def test_download_missing_id(self) -> None: + self.assertRaises(ValueError, self.server.workbooks.download, "") - def test_populate_views(self): - with open(POPULATE_VIEWS_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_populate_views(self) -> None: + with open(POPULATE_VIEWS_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/views', text=response_xml) - single_workbook = TSC.WorkbookItem('test') - single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2' + m.get(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/views", text=response_xml) + single_workbook = TSC.WorkbookItem("test") + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" self.server.workbooks.populate_views(single_workbook) views_list = single_workbook.views - self.assertEqual('097dbe13-de89-445f-b2c3-02f28bd010c1', views_list[0].id) - self.assertEqual('GDP per capita', views_list[0].name) - self.assertEqual('RESTAPISample/sheets/GDPpercapita', views_list[0].content_url) - - self.assertEqual('2c1ab9d7-8d64-4cc6-b495-52e40c60c330', views_list[1].id) - self.assertEqual('Country ranks', views_list[1].name) - self.assertEqual('RESTAPISample/sheets/Countryranks', views_list[1].content_url) - - self.assertEqual('0599c28c-6d82-457e-a453-e52c1bdb00f5', views_list[2].id) - self.assertEqual('Interest rates', views_list[2].name) - self.assertEqual('RESTAPISample/sheets/Interestrates', views_list[2].content_url) - - def test_populate_views_with_usage(self): - with open(POPULATE_VIEWS_USAGE_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') - with requests_mock.mock() as m: - m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/views?includeUsageStatistics=true', - text=response_xml) - single_workbook = TSC.WorkbookItem('test') - single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2' + self.assertEqual("097dbe13-de89-445f-b2c3-02f28bd010c1", views_list[0].id) + self.assertEqual("GDP per capita", views_list[0].name) + self.assertEqual("RESTAPISample/sheets/GDPpercapita", views_list[0].content_url) + + self.assertEqual("2c1ab9d7-8d64-4cc6-b495-52e40c60c330", views_list[1].id) + self.assertEqual("Country ranks", views_list[1].name) + self.assertEqual("RESTAPISample/sheets/Countryranks", views_list[1].content_url) + + self.assertEqual("0599c28c-6d82-457e-a453-e52c1bdb00f5", views_list[2].id) + self.assertEqual("Interest rates", views_list[2].name) + self.assertEqual("RESTAPISample/sheets/Interestrates", views_list[2].content_url) + + def test_populate_views_with_usage(self) -> None: + with open(POPULATE_VIEWS_USAGE_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get( + self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/views?includeUsageStatistics=true", + text=response_xml, + ) + single_workbook = TSC.WorkbookItem("test") + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" self.server.workbooks.populate_views(single_workbook, usage=True) views_list = single_workbook.views - self.assertEqual('097dbe13-de89-445f-b2c3-02f28bd010c1', views_list[0].id) + self.assertEqual("097dbe13-de89-445f-b2c3-02f28bd010c1", views_list[0].id) self.assertEqual(2, views_list[0].total_views) - self.assertEqual('2c1ab9d7-8d64-4cc6-b495-52e40c60c330', views_list[1].id) + self.assertEqual("2c1ab9d7-8d64-4cc6-b495-52e40c60c330", views_list[1].id) self.assertEqual(37, views_list[1].total_views) - self.assertEqual('0599c28c-6d82-457e-a453-e52c1bdb00f5', views_list[2].id) + self.assertEqual("0599c28c-6d82-457e-a453-e52c1bdb00f5", views_list[2].id) self.assertEqual(0, views_list[2].total_views) - def test_populate_views_missing_id(self): - single_workbook = TSC.WorkbookItem('test') + def test_populate_views_missing_id(self) -> None: + single_workbook = TSC.WorkbookItem("test") self.assertRaises(TSC.MissingRequiredFieldError, self.server.workbooks.populate_views, single_workbook) - def test_populate_connections(self): - with open(POPULATE_CONNECTIONS_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + def test_populate_connections(self) -> None: + with open(POPULATE_CONNECTIONS_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: - m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/connections', text=response_xml) - single_workbook = TSC.WorkbookItem('test') - single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2' + m.get(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/connections", text=response_xml) + single_workbook = TSC.WorkbookItem("test") + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" self.server.workbooks.populate_connections(single_workbook) - self.assertEqual('37ca6ced-58d7-4dcf-99dc-f0a85223cbef', single_workbook.connections[0].id) - self.assertEqual('dataengine', single_workbook.connections[0].connection_type) - self.assertEqual('4506225a-0d32-4ab1-82d3-c24e85f7afba', single_workbook.connections[0].datasource_id) - self.assertEqual('World Indicators', single_workbook.connections[0].datasource_name) + self.assertEqual("37ca6ced-58d7-4dcf-99dc-f0a85223cbef", single_workbook.connections[0].id) + self.assertEqual("dataengine", single_workbook.connections[0].connection_type) + self.assertEqual("4506225a-0d32-4ab1-82d3-c24e85f7afba", single_workbook.connections[0].datasource_id) + self.assertEqual("World Indicators", single_workbook.connections[0].datasource_name) + + def test_populate_permissions(self) -> None: + with open(POPULATE_PERMISSIONS_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(self.baseurl + "/21778de4-b7b9-44bc-a599-1506a2639ace/permissions", text=response_xml) + single_workbook = TSC.WorkbookItem("test") + single_workbook._id = "21778de4-b7b9-44bc-a599-1506a2639ace" + + self.server.workbooks.populate_permissions(single_workbook) + permissions = single_workbook.permissions + + self.assertEqual(permissions[0].grantee.tag_name, "group") + self.assertEqual(permissions[0].grantee.id, "5e5e1978-71fa-11e4-87dd-7382f5c437af") + self.assertDictEqual( + permissions[0].capabilities, + { + TSC.Permission.Capability.WebAuthoring: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Read: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.Filter: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.AddComment: TSC.Permission.Mode.Allow, + }, + ) + + self.assertEqual(permissions[1].grantee.tag_name, "user") + self.assertEqual(permissions[1].grantee.id, "7c37ee24-c4b1-42b6-a154-eaeab7ee330a") + self.assertDictEqual( + permissions[1].capabilities, + { + TSC.Permission.Capability.ExportImage: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ShareView: TSC.Permission.Mode.Allow, + TSC.Permission.Capability.ExportData: TSC.Permission.Mode.Deny, + TSC.Permission.Capability.ViewComments: TSC.Permission.Mode.Deny, + }, + ) + + def test_add_permissions(self) -> None: + with open(UPDATE_PERMISSIONS, "rb") as f: + response_xml = f.read().decode("utf-8") + + single_workbook = TSC.WorkbookItem("test") + single_workbook._id = "21778de4-b7b9-44bc-a599-1506a2639ace" + + bob = UserItem.as_reference("7c37ee24-c4b1-42b6-a154-eaeab7ee330a") + group_of_people = GroupItem.as_reference("5e5e1978-71fa-11e4-87dd-7382f5c437af") + + new_permissions = [PermissionsRule(bob, {"Write": "Allow"}), PermissionsRule(group_of_people, {"Read": "Deny"})] + + with requests_mock.mock() as m: + m.put(self.baseurl + "/21778de4-b7b9-44bc-a599-1506a2639ace/permissions", text=response_xml) + permissions = self.server.workbooks.update_permissions(single_workbook, new_permissions) + + self.assertEqual(permissions[0].grantee.tag_name, "group") + self.assertEqual(permissions[0].grantee.id, "5e5e1978-71fa-11e4-87dd-7382f5c437af") + self.assertDictEqual(permissions[0].capabilities, {TSC.Permission.Capability.Read: TSC.Permission.Mode.Deny}) - def test_populate_connections_missing_id(self): - single_workbook = TSC.WorkbookItem('test') - self.assertRaises(TSC.MissingRequiredFieldError, - self.server.workbooks.populate_connections, - single_workbook) + self.assertEqual(permissions[1].grantee.tag_name, "user") + self.assertEqual(permissions[1].grantee.id, "7c37ee24-c4b1-42b6-a154-eaeab7ee330a") + self.assertDictEqual(permissions[1].capabilities, {TSC.Permission.Capability.Write: TSC.Permission.Mode.Allow}) - def test_populate_pdf(self): + def test_populate_connections_missing_id(self) -> None: + single_workbook = TSC.WorkbookItem("test") + self.assertRaises(TSC.MissingRequiredFieldError, self.server.workbooks.populate_connections, single_workbook) + + def test_populate_pdf(self) -> None: self.server.version = "3.4" self.baseurl = self.server.workbooks.baseurl with open(POPULATE_PDF, "rb") as f: response = f.read() with requests_mock.mock() as m: - m.get(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/pdf?type=a5&orientation=landscape", - content=response) - single_workbook = TSC.WorkbookItem('test') - single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2' + m.get( + self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/pdf?type=a5&orientation=landscape", + content=response, + ) + single_workbook = TSC.WorkbookItem("test") + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" type = TSC.PDFRequestOptions.PageType.A5 orientation = TSC.PDFRequestOptions.Orientation.Landscape @@ -292,132 +450,486 @@ def test_populate_pdf(self): self.server.workbooks.populate_pdf(single_workbook, req_option) self.assertEqual(response, single_workbook.pdf) - def test_populate_preview_image(self): - with open(POPULATE_PREVIEW_IMAGE, 'rb') as f: + def test_populate_powerpoint(self) -> None: + self.server.version = "3.8" + self.baseurl = self.server.workbooks.baseurl + with open(POPULATE_POWERPOINT, "rb") as f: + response = f.read() + with requests_mock.mock() as m: + m.get( + self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/powerpoint", + content=response, + ) + single_workbook = TSC.WorkbookItem("test") + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" + + self.server.workbooks.populate_powerpoint(single_workbook) + self.assertEqual(response, single_workbook.powerpoint) + + def test_populate_preview_image(self) -> None: + with open(POPULATE_PREVIEW_IMAGE, "rb") as f: response = f.read() with requests_mock.mock() as m: - m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/previewImage', content=response) - single_workbook = TSC.WorkbookItem('test') - single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2' + m.get(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/previewImage", content=response) + single_workbook = TSC.WorkbookItem("test") + single_workbook._id = "1f951daf-4061-451a-9df1-69a8062664f2" self.server.workbooks.populate_preview_image(single_workbook) self.assertEqual(response, single_workbook.preview_image) - def test_populate_preview_image_missing_id(self): - single_workbook = TSC.WorkbookItem('test') - self.assertRaises(TSC.MissingRequiredFieldError, - self.server.workbooks.populate_preview_image, - single_workbook) + def test_populate_preview_image_missing_id(self) -> None: + single_workbook = TSC.WorkbookItem("test") + self.assertRaises(TSC.MissingRequiredFieldError, self.server.workbooks.populate_preview_image, single_workbook) + + def test_publish(self) -> None: + with open(PUBLISH_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.baseurl, text=response_xml) + + new_workbook = TSC.WorkbookItem( + name="Sample", show_tabs=False, project_id="ee8c6e70-43b6-11e6-af4f-f7b0d8e20760" + ) + + new_workbook.description = "REST API Testing" + + sample_workbook = os.path.join(TEST_ASSET_DIR, "SampleWB.twbx") + publish_mode = self.server.PublishMode.CreateNew + + new_workbook = self.server.workbooks.publish(new_workbook, sample_workbook, publish_mode) + + self.assertEqual("a8076ca1-e9d8-495e-bae6-c684dbb55836", new_workbook.id) + self.assertEqual("RESTAPISample", new_workbook.name) + self.assertEqual("RESTAPISample_0", new_workbook.content_url) + self.assertEqual(False, new_workbook.show_tabs) + self.assertEqual(1, new_workbook.size) + self.assertEqual("2016-08-18T18:33:24Z", format_datetime(new_workbook.created_at)) + self.assertEqual("2016-08-18T20:31:34Z", format_datetime(new_workbook.updated_at)) + self.assertEqual("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", new_workbook.project_id) + self.assertEqual("default", new_workbook.project_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", new_workbook.owner_id) + self.assertEqual("fe0b4e89-73f4-435e-952d-3a263fbfa56c", new_workbook.views[0].id) + self.assertEqual("GDP per capita", new_workbook.views[0].name) + self.assertEqual("RESTAPISample_0/sheets/GDPpercapita", new_workbook.views[0].content_url) + self.assertEqual("REST API Testing", new_workbook.description) + + def test_publish_a_packaged_file_object(self) -> None: + with open(PUBLISH_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.baseurl, text=response_xml) + + new_workbook = TSC.WorkbookItem( + name="Sample", show_tabs=False, project_id="ee8c6e70-43b6-11e6-af4f-f7b0d8e20760" + ) + + sample_workbook = os.path.join(TEST_ASSET_DIR, "SampleWB.twbx") + + with open(sample_workbook, "rb") as fp: + publish_mode = self.server.PublishMode.CreateNew + + new_workbook = self.server.workbooks.publish(new_workbook, fp, publish_mode) + + self.assertEqual("a8076ca1-e9d8-495e-bae6-c684dbb55836", new_workbook.id) + self.assertEqual("RESTAPISample", new_workbook.name) + self.assertEqual("RESTAPISample_0", new_workbook.content_url) + self.assertEqual(False, new_workbook.show_tabs) + self.assertEqual(1, new_workbook.size) + self.assertEqual("2016-08-18T18:33:24Z", format_datetime(new_workbook.created_at)) + self.assertEqual("2016-08-18T20:31:34Z", format_datetime(new_workbook.updated_at)) + self.assertEqual("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", new_workbook.project_id) + self.assertEqual("default", new_workbook.project_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", new_workbook.owner_id) + self.assertEqual("fe0b4e89-73f4-435e-952d-3a263fbfa56c", new_workbook.views[0].id) + self.assertEqual("GDP per capita", new_workbook.views[0].name) + self.assertEqual("RESTAPISample_0/sheets/GDPpercapita", new_workbook.views[0].content_url) + + def test_publish_non_packeged_file_object(self) -> None: + with open(PUBLISH_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.baseurl, text=response_xml) + + new_workbook = TSC.WorkbookItem( + name="Sample", show_tabs=False, project_id="ee8c6e70-43b6-11e6-af4f-f7b0d8e20760" + ) - def test_publish(self): - with open(PUBLISH_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + sample_workbook = os.path.join(TEST_ASSET_DIR, "RESTAPISample.twb") + + with open(sample_workbook, "rb") as fp: + publish_mode = self.server.PublishMode.CreateNew + + new_workbook = self.server.workbooks.publish(new_workbook, fp, publish_mode) + + self.assertEqual("a8076ca1-e9d8-495e-bae6-c684dbb55836", new_workbook.id) + self.assertEqual("RESTAPISample", new_workbook.name) + self.assertEqual("RESTAPISample_0", new_workbook.content_url) + self.assertEqual(False, new_workbook.show_tabs) + self.assertEqual(1, new_workbook.size) + self.assertEqual("2016-08-18T18:33:24Z", format_datetime(new_workbook.created_at)) + self.assertEqual("2016-08-18T20:31:34Z", format_datetime(new_workbook.updated_at)) + self.assertEqual("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", new_workbook.project_id) + self.assertEqual("default", new_workbook.project_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", new_workbook.owner_id) + self.assertEqual("fe0b4e89-73f4-435e-952d-3a263fbfa56c", new_workbook.views[0].id) + self.assertEqual("GDP per capita", new_workbook.views[0].name) + self.assertEqual("RESTAPISample_0/sheets/GDPpercapita", new_workbook.views[0].content_url) + + def test_publish_path_object(self) -> None: + with open(PUBLISH_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.post(self.baseurl, text=response_xml) - new_workbook = TSC.WorkbookItem(name='Sample', - show_tabs=False, - project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760') + new_workbook = TSC.WorkbookItem( + name="Sample", show_tabs=False, project_id="ee8c6e70-43b6-11e6-af4f-f7b0d8e20760" + ) - sample_workbok = os.path.join(TEST_ASSET_DIR, 'SampleWB.twbx') + sample_workbook = Path(TEST_ASSET_DIR) / "SampleWB.twbx" publish_mode = self.server.PublishMode.CreateNew - new_workbook = self.server.workbooks.publish(new_workbook, - sample_workbok, - publish_mode) + new_workbook = self.server.workbooks.publish(new_workbook, sample_workbook, publish_mode) - self.assertEqual('a8076ca1-e9d8-495e-bae6-c684dbb55836', new_workbook.id) - self.assertEqual('RESTAPISample', new_workbook.name) - self.assertEqual('RESTAPISample_0', new_workbook.content_url) + self.assertEqual("a8076ca1-e9d8-495e-bae6-c684dbb55836", new_workbook.id) + self.assertEqual("RESTAPISample", new_workbook.name) + self.assertEqual("RESTAPISample_0", new_workbook.content_url) self.assertEqual(False, new_workbook.show_tabs) self.assertEqual(1, new_workbook.size) - self.assertEqual('2016-08-18T18:33:24Z', format_datetime(new_workbook.created_at)) - self.assertEqual('2016-08-18T20:31:34Z', format_datetime(new_workbook.updated_at)) - self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', new_workbook.project_id) - self.assertEqual('default', new_workbook.project_name) - self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', new_workbook.owner_id) - self.assertEqual('fe0b4e89-73f4-435e-952d-3a263fbfa56c', new_workbook.views[0].id) - self.assertEqual('GDP per capita', new_workbook.views[0].name) - self.assertEqual('RESTAPISample_0/sheets/GDPpercapita', new_workbook.views[0].content_url) - - def test_publish_async(self): - with open(PUBLISH_ASYNC_XML, 'rb') as f: - response_xml = f.read().decode('utf-8') + self.assertEqual("2016-08-18T18:33:24Z", format_datetime(new_workbook.created_at)) + self.assertEqual("2016-08-18T20:31:34Z", format_datetime(new_workbook.updated_at)) + self.assertEqual("ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", new_workbook.project_id) + self.assertEqual("default", new_workbook.project_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", new_workbook.owner_id) + self.assertEqual("fe0b4e89-73f4-435e-952d-3a263fbfa56c", new_workbook.views[0].id) + self.assertEqual("GDP per capita", new_workbook.views[0].name) + self.assertEqual("RESTAPISample_0/sheets/GDPpercapita", new_workbook.views[0].content_url) + + def test_publish_with_hidden_views_on_workbook(self) -> None: + with open(PUBLISH_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.baseurl, text=response_xml) + + new_workbook = TSC.WorkbookItem( + name="Sample", show_tabs=False, project_id="ee8c6e70-43b6-11e6-af4f-f7b0d8e20760" + ) + + sample_workbook = os.path.join(TEST_ASSET_DIR, "SampleWB.twbx") + publish_mode = self.server.PublishMode.CreateNew + + new_workbook.hidden_views = ["GDP per capita"] + new_workbook = self.server.workbooks.publish(new_workbook, sample_workbook, publish_mode) + request_body = m._adapter.request_history[0]._request.body + # order of attributes in xml is unspecified + self.assertTrue(re.search(rb"<\/views>", request_body)) + self.assertTrue(re.search(rb"<\/views>", request_body)) + + def test_publish_with_thumbnails_user_id(self) -> None: + with open(PUBLISH_XML, "rb") as f: + response_xml = f.read().decode("utf-8") with requests_mock.mock() as m: m.post(self.baseurl, text=response_xml) - new_workbook = TSC.WorkbookItem(name='Sample', - show_tabs=False, - project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760') + new_workbook = TSC.WorkbookItem( + name="Sample", + show_tabs=False, + project_id="ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", + thumbnails_user_id="ee8c6e70-43b6-11e6-af4f-f7b0d8e20761", + ) - sample_workbok = os.path.join(TEST_ASSET_DIR, 'SampleWB.twbx') + sample_workbook = os.path.join(TEST_ASSET_DIR, "SampleWB.twbx") publish_mode = self.server.PublishMode.CreateNew + new_workbook = self.server.workbooks.publish(new_workbook, sample_workbook, publish_mode) + request_body = m._adapter.request_history[0]._request.body + # order of attributes in xml is unspecified + self.assertTrue(re.search(rb"thumbnailsUserId=\"ee8c6e70-43b6-11e6-af4f-f7b0d8e20761\"", request_body)) + + def test_publish_with_thumbnails_group_id(self) -> None: + with open(PUBLISH_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.baseurl, text=response_xml) - new_job = self.server.workbooks.publish(new_workbook, - sample_workbok, - publish_mode, - as_job=True) - - self.assertEqual('7c3d599e-949f-44c3-94a1-f30ba85757e4', new_job.id) - self.assertEqual('PublishWorkbook', new_job.type) - self.assertEqual('0', new_job.progress) - self.assertEqual('2018-06-29T23:22:32Z', format_datetime(new_job.created_at)) - self.assertEqual('1', new_job.finish_code) - - def test_publish_invalid_file(self): - new_workbook = TSC.WorkbookItem('test', 'ee8c6e70-43b6-11e6-af4f-f7b0d8e20760') - self.assertRaises(IOError, self.server.workbooks.publish, new_workbook, '.', - self.server.PublishMode.CreateNew) - - def test_publish_invalid_file_type(self): - new_workbook = TSC.WorkbookItem('test', 'ee8c6e70-43b6-11e6-af4f-f7b0d8e20760') - self.assertRaises(ValueError, self.server.workbooks.publish, - new_workbook, os.path.join(TEST_ASSET_DIR, 'SampleDS.tds'), - self.server.PublishMode.CreateNew) - - def test_publish_multi_connection(self): - new_workbook = TSC.WorkbookItem(name='Sample', show_tabs=False, - project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760') + new_workbook = TSC.WorkbookItem( + name="Sample", + show_tabs=False, + project_id="ee8c6e70-43b6-11e6-af4f-f7b0d8e20760", + thumbnails_group_id="ee8c6e70-43b6-11e6-af4f-f7b0d8e20762", + ) + + sample_workbook = os.path.join(TEST_ASSET_DIR, "SampleWB.twbx") + publish_mode = self.server.PublishMode.CreateNew + new_workbook = self.server.workbooks.publish(new_workbook, sample_workbook, publish_mode) + request_body = m._adapter.request_history[0]._request.body + self.assertTrue(re.search(rb"thumbnailsGroupId=\"ee8c6e70-43b6-11e6-af4f-f7b0d8e20762\"", request_body)) + + @pytest.mark.filterwarnings("ignore:'as_job' not available") + def test_publish_with_query_params(self) -> None: + with open(PUBLISH_ASYNC_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(self.baseurl, text=response_xml) + + new_workbook = TSC.WorkbookItem( + name="Sample", show_tabs=False, project_id="ee8c6e70-43b6-11e6-af4f-f7b0d8e20760" + ) + + sample_workbook = os.path.join(TEST_ASSET_DIR, "SampleWB.twbx") + publish_mode = self.server.PublishMode.CreateNew + + self.server.workbooks.publish( + new_workbook, sample_workbook, publish_mode, as_job=True, skip_connection_check=True + ) + + request_query_params = m._adapter.request_history[0].qs + self.assertTrue("asjob" in request_query_params) + self.assertTrue(request_query_params["asjob"]) + self.assertTrue("skipconnectioncheck" in request_query_params) + self.assertTrue(request_query_params["skipconnectioncheck"]) + + def test_publish_async(self) -> None: + self.server.version = "3.0" + baseurl = self.server.workbooks.baseurl + with open(PUBLISH_ASYNC_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post(baseurl, text=response_xml) + + new_workbook = TSC.WorkbookItem( + name="Sample", show_tabs=False, project_id="ee8c6e70-43b6-11e6-af4f-f7b0d8e20760" + ) + + sample_workbook = os.path.join(TEST_ASSET_DIR, "SampleWB.twbx") + publish_mode = self.server.PublishMode.CreateNew + + new_job = self.server.workbooks.publish(new_workbook, sample_workbook, publish_mode, as_job=True) + + self.assertEqual("7c3d599e-949f-44c3-94a1-f30ba85757e4", new_job.id) + self.assertEqual("PublishWorkbook", new_job.type) + self.assertEqual("0", new_job.progress) + self.assertEqual("2018-06-29T23:22:32Z", format_datetime(new_job.created_at)) + self.assertEqual(1, new_job.finish_code) + + def test_publish_invalid_file(self) -> None: + new_workbook = TSC.WorkbookItem("test", "ee8c6e70-43b6-11e6-af4f-f7b0d8e20760") + self.assertRaises(IOError, self.server.workbooks.publish, new_workbook, ".", self.server.PublishMode.CreateNew) + + def test_publish_invalid_file_type(self) -> None: + new_workbook = TSC.WorkbookItem("test", "ee8c6e70-43b6-11e6-af4f-f7b0d8e20760") + self.assertRaises( + ValueError, + self.server.workbooks.publish, + new_workbook, + os.path.join(TEST_ASSET_DIR, "SampleDS.tds"), + self.server.PublishMode.CreateNew, + ) + + def test_publish_unnamed_file_object(self) -> None: + new_workbook = TSC.WorkbookItem("test") + + with open(os.path.join(TEST_ASSET_DIR, "SampleWB.twbx"), "rb") as f: + self.assertRaises( + ValueError, self.server.workbooks.publish, new_workbook, f, self.server.PublishMode.CreateNew + ) + + def test_publish_non_bytes_file_object(self) -> None: + new_workbook = TSC.WorkbookItem("test") + + with open(os.path.join(TEST_ASSET_DIR, "SampleWB.twbx")) as f: + self.assertRaises( + TypeError, self.server.workbooks.publish, new_workbook, f, self.server.PublishMode.CreateNew + ) + + def test_publish_file_object_of_unknown_type_raises_exception(self) -> None: + new_workbook = TSC.WorkbookItem("test") + with BytesIO() as file_object: + file_object.write(bytes.fromhex("89504E470D0A1A0A")) + file_object.seek(0) + self.assertRaises( + ValueError, self.server.workbooks.publish, new_workbook, file_object, self.server.PublishMode.CreateNew + ) + + def test_publish_multi_connection(self) -> None: + new_workbook = TSC.WorkbookItem( + name="Sample", show_tabs=False, project_id="ee8c6e70-43b6-11e6-af4f-f7b0d8e20760" + ) connection1 = TSC.ConnectionItem() - connection1.server_address = 'mysql.test.com' - connection1.connection_credentials = TSC.ConnectionCredentials('test', 'secret', True) + connection1.server_address = "mysql.test.com" + connection1.connection_credentials = TSC.ConnectionCredentials("test", "secret", True) connection2 = TSC.ConnectionItem() - connection2.server_address = 'pgsql.test.com' - connection2.connection_credentials = TSC.ConnectionCredentials('test', 'secret', True) + connection2.server_address = "pgsql.test.com" + connection2.connection_credentials = TSC.ConnectionCredentials("test", "secret", True) response = RequestFactory.Workbook._generate_xml(new_workbook, connections=[connection1, connection2]) # Can't use ConnectionItem parser due to xml namespace problems - connection_results = ET.fromstring(response).findall('.//connection') + connection_results = fromstring(response).findall(".//connection") - self.assertEqual(connection_results[0].get('serverAddress', None), 'mysql.test.com') - self.assertEqual(connection_results[0].find('connectionCredentials').get('name', None), 'test') - self.assertEqual(connection_results[1].get('serverAddress', None), 'pgsql.test.com') - self.assertEqual(connection_results[1].find('connectionCredentials').get('password', None), 'secret') + self.assertEqual(connection_results[0].get("serverAddress", None), "mysql.test.com") + self.assertEqual(connection_results[0].find("connectionCredentials").get("name", None), "test") # type: ignore[union-attr] + self.assertEqual(connection_results[1].get("serverAddress", None), "pgsql.test.com") + self.assertEqual(connection_results[1].find("connectionCredentials").get("password", None), "secret") # type: ignore[union-attr] - def test_publish_single_connection(self): - new_workbook = TSC.WorkbookItem(name='Sample', show_tabs=False, - project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760') - connection_creds = TSC.ConnectionCredentials('test', 'secret', True) + def test_publish_multi_connection_flat(self) -> None: + new_workbook = TSC.WorkbookItem( + name="Sample", show_tabs=False, project_id="ee8c6e70-43b6-11e6-af4f-f7b0d8e20760" + ) + connection1 = TSC.ConnectionItem() + connection1.server_address = "mysql.test.com" + connection1.username = "test" + connection1.password = "secret" + connection1.embed_password = True + connection2 = TSC.ConnectionItem() + connection2.server_address = "pgsql.test.com" + connection2.username = "test" + connection2.password = "secret" + connection2.embed_password = True - response = RequestFactory.Workbook._generate_xml(new_workbook, connection_credentials=connection_creds) + response = RequestFactory.Workbook._generate_xml(new_workbook, connections=[connection1, connection2]) # Can't use ConnectionItem parser due to xml namespace problems - credentials = ET.fromstring(response).findall('.//connectionCredentials') - self.assertEqual(len(credentials), 1) - self.assertEqual(credentials[0].get('name', None), 'test') - self.assertEqual(credentials[0].get('password', None), 'secret') - self.assertEqual(credentials[0].get('embed', None), 'true') + connection_results = fromstring(response).findall(".//connection") + + self.assertEqual(connection_results[0].get("serverAddress", None), "mysql.test.com") + self.assertEqual(connection_results[0].find("connectionCredentials").get("name", None), "test") # type: ignore[union-attr] + self.assertEqual(connection_results[1].get("serverAddress", None), "pgsql.test.com") + self.assertEqual(connection_results[1].find("connectionCredentials").get("password", None), "secret") # type: ignore[union-attr] - def test_credentials_and_multi_connect_raises_exception(self): - new_workbook = TSC.WorkbookItem(name='Sample', show_tabs=False, - project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760') + def test_synchronous_publish_timeout_error(self) -> None: + with requests_mock.mock() as m: + m.register_uri("POST", self.baseurl, status_code=504) - connection_creds = TSC.ConnectionCredentials('test', 'secret', True) + new_workbook = TSC.WorkbookItem(project_id="") + publish_mode = self.server.PublishMode.CreateNew - connection1 = TSC.ConnectionItem() - connection1.server_address = 'mysql.test.com' - connection1.connection_credentials = TSC.ConnectionCredentials('test', 'secret', True) + self.assertRaisesRegex( + InternalServerError, + "Please use asynchronous publishing to avoid timeouts", + self.server.workbooks.publish, + new_workbook, + asset("SampleWB.twbx"), + publish_mode, + ) + + def test_delete_extracts_all(self) -> None: + self.server.version = "3.10" + self.baseurl = self.server.workbooks.baseurl + + with open(PUBLISH_ASYNC_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post( + self.baseurl + "/3cc6cd06-89ce-4fdc-b935-5294135d6d42/deleteExtract", status_code=200, text=response_xml + ) + self.server.workbooks.delete_extract("3cc6cd06-89ce-4fdc-b935-5294135d6d42") + + def test_create_extracts_all(self) -> None: + self.server.version = "3.10" + self.baseurl = self.server.workbooks.baseurl + + with open(PUBLISH_ASYNC_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post( + self.baseurl + "/3cc6cd06-89ce-4fdc-b935-5294135d6d42/createExtract", status_code=200, text=response_xml + ) + self.server.workbooks.create_extract("3cc6cd06-89ce-4fdc-b935-5294135d6d42") + + def test_create_extracts_one(self) -> None: + self.server.version = "3.10" + self.baseurl = self.server.workbooks.baseurl + + datasource = TSC.DatasourceItem("test") + datasource._id = "1f951daf-4061-451a-9df1-69a8062664f2" + + with open(PUBLISH_ASYNC_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.post( + self.baseurl + "/3cc6cd06-89ce-4fdc-b935-5294135d6d42/createExtract", status_code=200, text=response_xml + ) + self.server.workbooks.create_extract("3cc6cd06-89ce-4fdc-b935-5294135d6d42", False, datasource) + + def test_revisions(self) -> None: + self.baseurl = self.server.workbooks.baseurl + workbook = TSC.WorkbookItem("project", "test") + workbook._id = "06b944d2-959d-4604-9305-12323c95e70e" + + with open(REVISION_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + with requests_mock.mock() as m: + m.get(f"{self.baseurl}/{workbook.id}/revisions", text=response_xml) + self.server.workbooks.populate_revisions(workbook) + revisions = workbook.revisions + + self.assertEqual(len(revisions), 3) + self.assertEqual("2016-07-26T20:34:56Z", format_datetime(revisions[0].created_at)) + self.assertEqual("2016-07-27T20:34:56Z", format_datetime(revisions[1].created_at)) + self.assertEqual("2016-07-28T20:34:56Z", format_datetime(revisions[2].created_at)) + + self.assertEqual(False, revisions[0].deleted) + self.assertEqual(False, revisions[0].current) + self.assertEqual(False, revisions[1].deleted) + self.assertEqual(False, revisions[1].current) + self.assertEqual(False, revisions[2].deleted) + self.assertEqual(True, revisions[2].current) + + self.assertEqual("Cassie", revisions[0].user_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", revisions[0].user_id) + self.assertIsNone(revisions[1].user_name) + self.assertIsNone(revisions[1].user_id) + self.assertEqual("Cassie", revisions[2].user_name) + self.assertEqual("5de011f8-5aa9-4d5b-b991-f462c8dd6bb7", revisions[2].user_id) + + def test_delete_revision(self) -> None: + self.baseurl = self.server.workbooks.baseurl + workbook = TSC.WorkbookItem("project", "test") + workbook._id = "06b944d2-959d-4604-9305-12323c95e70e" + + with requests_mock.mock() as m: + m.delete(f"{self.baseurl}/{workbook.id}/revisions/3") + self.server.workbooks.delete_revision(workbook.id, "3") + + def test_download_revision(self) -> None: + with requests_mock.mock() as m, tempfile.TemporaryDirectory() as td: + m.get( + self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/revisions/3/content", + headers={"Content-Disposition": 'name="tableau_datasource"; filename="Sample datasource.tds"'}, + ) + file_path = self.server.workbooks.download_revision("9dbd2263-16b5-46e1-9c43-a76bb8ab65fb", "3", td) + self.assertTrue(os.path.exists(file_path)) + + def test_bad_download_response(self) -> None: + with requests_mock.mock() as m, tempfile.TemporaryDirectory() as td: + m.get( + self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/content", + headers={"Content-Disposition": '''name="tableau_workbook"; filename*=UTF-8''"Sample workbook.twb"'''}, + ) + file_path = self.server.workbooks.download("9dbd2263-16b5-46e1-9c43-a76bb8ab65fb", td) + self.assertTrue(os.path.exists(file_path)) + + def test_odata_connection(self) -> None: + self.baseurl = self.server.workbooks.baseurl + workbook = TSC.WorkbookItem("project", "test") + workbook._id = "06b944d2-959d-4604-9305-12323c95e70e" + connection = TSC.ConnectionItem() + url = "https://odata.website.com/TestODataEndpoint" + connection.server_address = url + connection._connection_type = "odata" + connection._id = "17376070-64d1-4d17-acb4-a56e4b5b1768" + + creds = TSC.ConnectionCredentials("", "", True) + connection.connection_credentials = creds + with open(ODATA_XML, "rb") as f: + response_xml = f.read().decode("utf-8") + + with requests_mock.mock() as m: + m.put(f"{self.baseurl}/{workbook.id}/connections/{connection.id}", text=response_xml) + self.server.workbooks.update_connection(workbook, connection) + + history = m.request_history + + request = history[0] + xml = fromstring(request.body) + xml_connection = xml.find(".//connection") - with self.assertRaises(RuntimeError): - response = RequestFactory.Workbook._generate_xml(new_workbook, - connection_credentials=connection_creds, - connections=[connection1]) + assert xml_connection is not None + self.assertEqual(xml_connection.get("serverAddress"), url) diff --git a/test/test_workbook_model.py b/test/test_workbook_model.py index 69188fa4a..fc6423564 100644 --- a/test/test_workbook_model.py +++ b/test/test_workbook_model.py @@ -1,14 +1,9 @@ import unittest + import tableauserverclient as TSC class WorkbookModelTests(unittest.TestCase): - def test_invalid_project_id(self): - self.assertRaises(ValueError, TSC.WorkbookItem, None) - workbook = TSC.WorkbookItem("10") - with self.assertRaises(ValueError): - workbook.project_id = None - def test_invalid_show_tabs(self): workbook = TSC.WorkbookItem("10") with self.assertRaises(ValueError): diff --git a/versioneer.py b/versioneer.py old mode 100755 new mode 100644 index 59211ed6f..cce899f58 --- a/versioneer.py +++ b/versioneer.py @@ -276,7 +276,7 @@ """ -from __future__ import print_function + try: import configparser except ImportError: @@ -308,11 +308,13 @@ def get_root(): setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - err = ("Versioneer was unable to run the project root directory. " - "Versioneer requires setup.py to be executed from " - "its immediate directory (like 'python setup.py COMMAND'), " - "or in a way that lets it use sys.argv[0] to find the root " - "(like 'python path/to/setup.py COMMAND').") + err = ( + "Versioneer was unable to run the project root directory. " + "Versioneer requires setup.py to be executed from " + "its immediate directory (like 'python setup.py COMMAND'), " + "or in a way that lets it use sys.argv[0] to find the root " + "(like 'python path/to/setup.py COMMAND')." + ) raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools @@ -325,8 +327,7 @@ def get_root(): me_dir = os.path.normcase(os.path.splitext(me)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir: - print("Warning: build in %s is using versioneer.py from %s" - % (os.path.dirname(me), versioneer_py)) + print(f"Warning: build in {os.path.dirname(me)} is using versioneer.py from {versioneer_py}") except NameError: pass return root @@ -340,7 +341,7 @@ def get_config_from_root(root): # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") parser = configparser.SafeConfigParser() - with open(setup_cfg, "r") as f: + with open(setup_cfg) as f: parser.readfp(f) VCS = parser.get("versioneer", "VCS") # mandatory @@ -348,6 +349,7 @@ def get(parser, name): if parser.has_option("versioneer", name): return parser.get("versioneer", name) return None + cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, "style") or "" @@ -372,17 +374,18 @@ class NotThisMethod(Exception): def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" + def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f + return decorate -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None @@ -390,12 +393,11 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + p = subprocess.Popen( + [c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None) + ) break - except EnvironmentError: + except OSError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue @@ -405,7 +407,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, return None, None else: if verbose: - print("unable to find command, tried %s" % (commands,)) + print(f"unable to find command, tried {commands}" return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: @@ -418,7 +420,9 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, return stdout, p.returncode -LONG_VERSION_PY['git'] = ''' +LONG_VERSION_PY[ + "git" +] = r''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build @@ -950,7 +954,7 @@ def git_get_keywords(versionfile_abs): # _version.py. keywords = {} try: - f = open(versionfile_abs, "r") + f = open(versionfile_abs) for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) @@ -965,7 +969,7 @@ def git_get_keywords(versionfile_abs): if mo: keywords["date"] = mo.group(1) f.close() - except EnvironmentError: + except OSError: pass return keywords @@ -989,11 +993,11 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) + refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d @@ -1002,7 +1006,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) + tags = {r for r in refs if re.search(r"\d", r)} if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: @@ -1010,19 +1014,26 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] + r = ref[len(tag_prefix) :] if verbose: print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} + return { + "version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": None, + "date": date, + } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} + return { + "version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": "no suitable tags", + "date": None, + } @register_vcs_handler("git", "pieces_from_vcs") @@ -1037,8 +1048,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) + out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) @@ -1046,10 +1056,9 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%s*" % tag_prefix], - cwd=root) + describe_out, rc = run_command( + GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root + ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") @@ -1072,17 +1081,16 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] + git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) + pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag @@ -1091,10 +1099,9 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) + pieces["error"] = f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'" return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] + pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) @@ -1105,13 +1112,11 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) + count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() + date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces @@ -1139,13 +1144,13 @@ def do_vcs_install(manifest_in, versionfile_source, ipy): files.append(versioneer_file) present = False try: - f = open(".gitattributes", "r") + f = open(".gitattributes") for line in f.readlines(): if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True f.close() - except EnvironmentError: + except OSError: pass if not present: f = open(".gitattributes", "a+") @@ -1167,16 +1172,19 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} + return { + "version": dirname[len(parentdir_prefix) :], + "full-revisionid": None, + "dirty": False, + "error": None, + "date": None, + } else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) + print(f"Tried directories {rootdirs!s} but none started with prefix {parentdir_prefix}") raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @@ -1203,13 +1211,11 @@ def versions_from_file(filename): try: with open(filename) as f: contents = f.read() - except EnvironmentError: + except OSError: raise NotThisMethod("unable to read _version.py") - mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) + mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: - mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) + mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) @@ -1218,12 +1224,11 @@ def versions_from_file(filename): def write_to_version_file(filename, versions): """Write the given version number to the given _version.py file.""" os.unlink(filename) - contents = json.dumps(versions, sort_keys=True, - indent=1, separators=(",", ": ")) + contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) - print("set %s to '%s'" % (filename, versions["version"])) + print(f"set {filename} to '{versions['version']}'") def plus_or_dot(pieces): @@ -1251,8 +1256,7 @@ def render_pep440(pieces): rendered += ".dirty" else: # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) + rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered @@ -1366,11 +1370,13 @@ def render_git_describe_long(pieces): def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} + return { + "version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None, + } if not style or style == "default": style = "pep440" # the default @@ -1390,9 +1396,13 @@ def render(pieces, style): else: raise ValueError("unknown style '%s'" % style) - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} + return { + "version": rendered, + "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], + "error": None, + "date": pieces.get("date"), + } class VersioneerBadRootError(Exception): @@ -1415,8 +1425,7 @@ def get_versions(verbose=False): handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose - assert cfg.versionfile_source is not None, \ - "please set versioneer.versionfile_source" + assert cfg.versionfile_source is not None, "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) @@ -1442,7 +1451,7 @@ def get_versions(verbose=False): try: ver = versions_from_file(versionfile_abs) if verbose: - print("got version from file %s %s" % (versionfile_abs, ver)) + print(f"got version from file {versionfile_abs} {ver}") return ver except NotThisMethod: pass @@ -1470,9 +1479,13 @@ def get_versions(verbose=False): if verbose: print("unable to compute version") - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, "error": "unable to compute version", - "date": None} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", + "date": None, + } def get_version(): @@ -1521,6 +1534,7 @@ def run(self): print(" date: %s" % vers.get("date")) if vers["error"]: print(" error: %s" % vers["error"]) + cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools @@ -1553,14 +1567,15 @@ def run(self): # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: - target_versionfile = os.path.join(self.build_lib, - cfg.versionfile_build) + target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) + cmds["build_py"] = cmd_build_py if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe + # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ @@ -1581,17 +1596,21 @@ def run(self): os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) + f.write( + LONG + % { + "DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + } + ) + cmds["build_exe"] = cmd_build_exe del cmds["build_py"] - if 'py2exe' in sys.modules: # py2exe enabled? + if "py2exe" in sys.modules: # py2exe enabled? try: from py2exe.distutils_buildexe import py2exe as _py2exe # py3 except ImportError: @@ -1610,13 +1629,17 @@ def run(self): os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) + f.write( + LONG + % { + "DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + } + ) + cmds["py2exe"] = cmd_py2exe # we override different "sdist" commands for both environments @@ -1643,8 +1666,8 @@ def make_release_tree(self, base_dir, files): # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, - self._versioneer_generated_versions) + write_to_version_file(target_versionfile, self._versioneer_generated_versions) + cmds["sdist"] = cmd_sdist return cmds @@ -1699,11 +1722,9 @@ def do_setup(): root = get_root() try: cfg = get_config_from_root(root) - except (EnvironmentError, configparser.NoSectionError, - configparser.NoOptionError) as e: + except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): - print("Adding sample versioneer config to setup.cfg", - file=sys.stderr) + print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) @@ -1712,20 +1733,23 @@ def do_setup(): print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - - ipy = os.path.join(os.path.dirname(cfg.versionfile_source), - "__init__.py") + f.write( + LONG + % { + "DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + } + ) + + ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: - with open(ipy, "r") as f: + with open(ipy) as f: old = f.read() - except EnvironmentError: + except OSError: old = "" if INIT_PY_SNIPPET not in old: print(" appending to %s" % ipy) @@ -1744,12 +1768,12 @@ def do_setup(): manifest_in = os.path.join(root, "MANIFEST.in") simple_includes = set() try: - with open(manifest_in, "r") as f: + with open(manifest_in) as f: for line in f: if line.startswith("include "): for include in line.split()[1:]: simple_includes.add(include) - except EnvironmentError: + except OSError: pass # That doesn't cover everything MANIFEST.in can do # (http://docs.python.org/2/distutils/sourcedist.html#commands), so @@ -1762,8 +1786,7 @@ def do_setup(): else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: - print(" appending versionfile_source ('%s') to MANIFEST.in" % - cfg.versionfile_source) + print(" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: @@ -1781,7 +1804,7 @@ def scan_setup_py(): found = set() setters = False errors = 0 - with open("setup.py", "r") as f: + with open("setup.py") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import")