diff --git a/.docker/README.md b/.docker/README.md index 66b76b0f2d..5e18956d90 100644 --- a/.docker/README.md +++ b/.docker/README.md @@ -4,7 +4,7 @@ This directory is a space for mounting directories to docker containers, allowin ### postgres The `postgres` directory is mounted to `/docker-entrypoint-initdb.d`. Any `.sh` or `.sql` files will be executed when the container is first started with a new data volume. You may read more regarding this functionality on the [Docker Hub page](https://hub.docker.com/_/postgres), under _Initialization scripts_. -When running docker services through the Makefile commands, it specifies a docker-compose project name that depends on the name of the current git branch. This causes the volumes to change when the branch changes, which is helpful when switching between many branches that might have incompatible database schema changes. The downside is that whenever you start a new branch, you'll have to re-initialize the database again, like with `yarn run devsetup`. Creating a SQL dump from an existing, initialized database and placing it in this directory will allow you to skip this step. +When running docker services through the Makefile commands, it specifies a docker-compose project name that depends on the name of the current git branch. This causes the volumes to change when the branch changes, which is helpful when switching between many branches that might have incompatible database schema changes. The downside is that whenever you start a new branch, you'll have to re-initialize the database again, like with `pnpm run devsetup`. Creating a SQL dump from an existing, initialized database and placing it in this directory will allow you to skip this step. To create a SQL dump of your preferred database data useful for local testing, run `make .docker/postgres/init.sql` while the docker postgres container is running. diff --git a/.dockerignore b/.dockerignore index f5772ce6bb..233f6836f8 100644 --- a/.dockerignore +++ b/.dockerignore @@ -6,4 +6,4 @@ __pycache__ *.pyc *.swp k8s/templates/ -cloudbuild-*.yaml \ No newline at end of file +cloudbuild-*.yaml diff --git a/.eslintrc.js b/.eslintrc.js index a9539d7f86..61d27d5e49 100644 --- a/.eslintrc.js +++ b/.eslintrc.js @@ -1,4 +1,4 @@ -const esLintConfig = require('kolibri-tools/.eslintrc'); +const esLintConfig = require('kolibri-format/.eslintrc'); esLintConfig.globals = { $: false, @@ -8,7 +8,18 @@ esLintConfig.globals = { MathJax: false, jest: false, }; -esLintConfig.settings['import/resolver']['webpack'] = { config: 'webpack.config.js'}; +esLintConfig.settings['import/resolver']['webpack'] = { config: require.resolve('./webpack.config.js')}; + +// Update resolver settings to allow for pnpm's symlinked structure +// https://github.com/import-js/eslint-plugin-import/issues/3110 +const nodeResolverSettings = esLintConfig.settings['import/resolver']['node']; +esLintConfig.settings['import/resolver']['node'] = { ...(nodeResolverSettings || {}), preserveSymlinks: false }; + +// Remove once Vuetify is gone-- Vuetify uses too many unacceptable class names +esLintConfig.rules['kolibri/vue-component-class-name-casing'] = 0; + +// Dumb +esLintConfig.rules['vue/no-v-text-v-html-on-component'] = 0; // Vuetify's helper attributes use hyphens and they would // not be recognized if auto-formatted to camel case diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..b9718afad7 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,6 @@ +# Run this command to always ignore formatting commits in `git blame` +# git config blame.ignoreRevsFile .git-blame-ignore-revs + +# Linting updates and fixes +a52e08e5c2031cecb97a03fbed49997756ebe01b +8ccaaa60efd1c07b220aefce5a307e4791345111 diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml deleted file mode 100644 index 0b2ccf668e..0000000000 --- a/.github/ISSUE_TEMPLATE/config.yml +++ /dev/null @@ -1,8 +0,0 @@ -blank_issues_enabled: false -contact_links: - - name: Studio GitHub Discussions - url: https://github.com/learningequality/studio/discussions - about: Please ask general questions about contributing to Studio or report development server issues here. - - name: Learning Equality Community Forum - url: https://community.learningequality.org/ - about: Ask and answer questions about Learning Equality's products and tools, share your experiences using Kolibri, and connect with users around the world. diff --git a/.github/ISSUE_TEMPLATE/enhancement_template.md b/.github/ISSUE_TEMPLATE/enhancement_template.md deleted file mode 100644 index 05f71ecdf8..0000000000 --- a/.github/ISSUE_TEMPLATE/enhancement_template.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -name: Enhancement request -about: Suggest an idea for Kolibri Studio -title: "[Title]: [Brief description]" ---- - - - -## Desired behavior - - - -## Current behavior - - - -## Value add - - - -## Possible tradeoffs - - - ---- - -## Add labels - diff --git a/.github/ISSUE_TEMPLATE/issue_template.md b/.github/ISSUE_TEMPLATE/issue_template.md deleted file mode 100644 index 0a91cc309f..0000000000 --- a/.github/ISSUE_TEMPLATE/issue_template.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -name: Submit an issue -about: Report a bug to help us improve -title: "[Title]: [Brief description]" ---- - - - -## Observed behavior - - - -## Expected behavior - - - -## User-facing consequences - - - -## Errors and logs - - -``` -01:10 info: something happened -01:12 error: something bad happened -``` - - -## Additional information - - - -## Steps to reproduce the issue -1. Step one -2. Step two -3. ... - - -## Usage Details - - - OS: - - Browser: - - URL: - - Other information that may be relevant: diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 24349f8d83..8f81bf7041 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -22,6 +22,12 @@ updates: babel: patterns: - "@babel/*" + tiptap: + patterns: + - "@tiptap/*" + jest: + patterns: + - "jest*" # Maintain dependencies for Github Actions - package-ecosystem: "github-actions" diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md deleted file mode 100644 index 219cd954fc..0000000000 --- a/.github/pull_request_template.md +++ /dev/null @@ -1,84 +0,0 @@ - - -## Summary -### Description of the change(s) you made - - - -### Manual verification steps performed -1. Step 1 -2. Step 2 -3. ... - -### Screenshots (if applicable) - - -### Does this introduce any tech-debt items? - -___ -## Reviewer guidance -### How can a reviewer test these changes? - - - -### Are there any risky areas that deserve extra testing? - - - -## References - - -## Comments - - ----- - -### Contributor's Checklist - - -PR process: - -- [ ] If this is an important user-facing change, PR or related issue the `CHANGELOG` label been added to this PR. Note: items with this label will be added to the [CHANGELOG](https://github.com/learningequality/studio/blob/master/CHANGELOG.md) at a later time -- [ ] If this includes an internal dependency change, a link to the diff is provided -- [ ] The `docs` label has been added if this introduces a change that needs to be updated in the [user docs](https://kolibri-studio.readthedocs.io/en/latest/index.html)? -- [ ] If any Python requirements have changed, the updated `requirements.txt` files also included in this PR -- [ ] Opportunities for using Google Analytics here are noted -- [ ] Migrations are [safe for a large db](https://www.braintreepayments.com/blog/safe-operations-for-high-volume-postgresql/) - -Studio-specifc: - -- [ ] All user-facing strings are translated properly -- [ ] The `notranslate` class been added to elements that shouldn't be translated by Google Chrome's automatic translation feature (e.g. icons, user-generated text) -- [ ] All UI components are LTR and RTL compliant -- [ ] Views are organized into `pages`, `components`, and `layouts` directories [as described in the docs](https://github.com/learningequality/studio/blob/vue-refactor/docs/architecture.md#where-does-the-frontend-code-live) -- [ ] Users' storage used is recalculated properly on any changes to main tree files -- [ ] If there new ways this uses user data that needs to be factored into our [Privacy Policy](https://github.com/learningequality/studio/tree/master/contentcuration/contentcuration/templates/policies/text), it has been noted. - - -Testing: - -- [ ] Code is clean and well-commented -- [ ] Contributor has fully tested the PR manually -- [ ] If there are any front-end changes, before/after screenshots are included -- [ ] Critical user journeys are covered by Gherkin stories -- [ ] Any new interactions have been added to the [QA Sheet](https://docs.google.com/spreadsheets/d/1HF4Gy6rb_BLbZoNkZEWZonKFBqPyVEiQq4Ve6XgIYmQ/edit#gid=0) -- [ ] Critical and brittle code paths are covered by unit tests -___ - -### Reviewer's Checklist -#### This section is for reviewers to fill out. - -- [ ] Automated test coverage is satisfactory -- [ ] PR is fully functional -- [ ] PR has been tested for [accessibility regressions](http://kolibri-dev.readthedocs.io/en/develop/manual_testing.html#accessibility-a11y-testing) -- [ ] External dependency files were updated if necessary (`yarn` and `pip`) -- [ ] Documentation is updated -- [ ] Contributor is in AUTHORS.md diff --git a/.github/workflows/call-contributor-issue-comment.yml b/.github/workflows/call-contributor-issue-comment.yml new file mode 100644 index 0000000000..4e8b7db1c3 --- /dev/null +++ b/.github/workflows/call-contributor-issue-comment.yml @@ -0,0 +1,14 @@ +name: Handle contributor comment on GitHub issue + +on: + issue_comment: + types: [created] + +jobs: + call-workflow: + uses: learningequality/.github/.github/workflows/contributor-issue-comment.yml@main + secrets: + LE_BOT_APP_ID: ${{ secrets.LE_BOT_APP_ID }} + LE_BOT_PRIVATE_KEY: ${{ secrets.LE_BOT_PRIVATE_KEY }} + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + SLACK_COMMUNITY_NOTIFICATIONS_WEBHOOK_URL: ${{ secrets.SLACK_COMMUNITY_NOTIFICATIONS_WEBHOOK_URL }} diff --git a/.github/workflows/call-manage-issue-header.yml b/.github/workflows/call-manage-issue-header.yml new file mode 100644 index 0000000000..4af6730ac2 --- /dev/null +++ b/.github/workflows/call-manage-issue-header.yml @@ -0,0 +1,13 @@ +name: Manage issue header + +on: + issues: + types: [opened, reopened, labeled, unlabeled] + +jobs: + call-workflow: + name: Call shared workflow + uses: learningequality/.github/.github/workflows/manage-issue-header.yml@main + secrets: + LE_BOT_APP_ID: ${{ secrets.LE_BOT_APP_ID }} + LE_BOT_PRIVATE_KEY: ${{ secrets.LE_BOT_PRIVATE_KEY }} diff --git a/.github/workflows/community-contribution-labeling.yml b/.github/workflows/community-contribution-labeling.yml new file mode 100644 index 0000000000..701465ba1c --- /dev/null +++ b/.github/workflows/community-contribution-labeling.yml @@ -0,0 +1,12 @@ +name: Community Contribution Label + +on: + issues: + types: [assigned, unassigned] + +jobs: + call-label-action: + uses: learningequality/.github/.github/workflows/community-contribution-label.yml@main + secrets: + LE_BOT_APP_ID: ${{ secrets.LE_BOT_APP_ID }} + LE_BOT_PRIVATE_KEY: ${{ secrets.LE_BOT_PRIVATE_KEY }} diff --git a/.github/workflows/containerbuild.yml b/.github/workflows/containerbuild.yml index 361b0fad36..7b367f0eb0 100644 --- a/.github/workflows/containerbuild.yml +++ b/.github/workflows/containerbuild.yml @@ -57,7 +57,7 @@ jobs: DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index - name: Build and push Docker image - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: ./docker file: ./docker/Dockerfile.postgres.dev @@ -97,7 +97,7 @@ jobs: uses: docker/setup-buildx-action@v3 - name: Build Docker image - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: ./ file: ./k8s/images/nginx/Dockerfile diff --git a/.github/workflows/deploytest.yml b/.github/workflows/deploytest.yml index 71b3b9296c..b5c3fef9a3 100644 --- a/.github/workflows/deploytest.yml +++ b/.github/workflows/deploytest.yml @@ -20,7 +20,7 @@ jobs: uses: fkirc/skip-duplicate-actions@master with: github_token: ${{ github.token }} - paths: '["**.py", "requirements.txt", ".github/workflows/deploytest.yml", "**.vue", "**.js", "yarn.lock", "package.json"]' + paths: '["**.py", "requirements.txt", ".github/workflows/deploytest.yml", "**.vue", "**.js", "pnpm-lock.yaml", "package.json"]' build_assets: name: Build frontend assets needs: pre_job @@ -28,23 +28,19 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - name: Use pnpm + uses: pnpm/action-setup@v4 - name: Use Node.js uses: actions/setup-node@v4 with: - node-version: '16.x' - - name: Cache Node.js modules - uses: actions/cache@v4 - with: - path: '**/node_modules' - key: ${{ runner.OS }}-node-${{ hashFiles('**/yarn.lock') }} - restore-keys: | - ${{ runner.OS }}-node- + node-version: '20.x' + cache: 'pnpm' - name: Install dependencies run: | - yarn --frozen-lockfile - npm rebuild node-sass + pnpm install --frozen-lockfile + pnpm rebuild node-sass - name: Build frontend - run: yarn run build + run: pnpm run build make_messages: name: Build all message files needs: pre_job @@ -68,21 +64,17 @@ jobs: python -m pip install --upgrade pip pip install pip-tools pip-sync requirements.txt + - name: Use pnpm + uses: pnpm/action-setup@v4 - name: Use Node.js uses: actions/setup-node@v4 with: - node-version: '16.x' - - name: Cache Node.js modules - uses: actions/cache@v4 - with: - path: '**/node_modules' - key: ${{ runner.OS }}-node-${{ hashFiles('**/yarn.lock') }} - restore-keys: | - ${{ runner.OS }}-node- + node-version: '20.x' + cache: 'pnpm' - name: Install node dependencies run: | - yarn --frozen-lockfile - npm rebuild node-sass + pnpm install --frozen-lockfile + pnpm rebuild node-sass - name: Install gettext run: | sudo apt-get update -y diff --git a/.github/workflows/frontendlint.yml b/.github/workflows/frontendlint.yml deleted file mode 100644 index c28a80937a..0000000000 --- a/.github/workflows/frontendlint.yml +++ /dev/null @@ -1,65 +0,0 @@ -name: Javascript Linting - -on: - push: - branches: - - unstable - - hotfixes - - master - pull_request: - -jobs: - pre_job: - name: Path match check - runs-on: ubuntu-latest - # Map a step output to a job output - outputs: - should_skip: ${{ steps.skip_check.outputs.should_skip }} - steps: - - id: skip_check - uses: fkirc/skip-duplicate-actions@master - with: - github_token: ${{ github.token }} - paths: '["**.vue", "**.js", "yarn.lock", ".github/workflows/frontendlint.yml"]' - test: - name: Frontend linting - needs: pre_job - if: ${{ needs.pre_job.outputs.should_skip != 'true' }} - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Use Node.js - uses: actions/setup-node@v4 - with: - node-version: '16.x' - - name: Cache Node.js modules - uses: actions/cache@v4 - with: - path: '**/node_modules' - key: ${{ runner.OS }}-node-${{ hashFiles('**/yarn.lock') }} - restore-keys: | - ${{ runner.OS }}-node- - - name: Install dependencies - run: | - yarn --frozen-lockfile - npm rebuild node-sass - - name: Run tests - run: yarn run lint-frontend:format - - name: Check for modified files - if: github.event.pull_request && github.event.pull_request.head.repo.full_name == github.repository - id: git-check - run: echo ::set-output name=modified::$(git diff-index --name-only HEAD) - - uses: tibdex/github-app-token@v2 - if: github.event.pull_request && github.event.pull_request.head.repo.full_name == github.repository && steps.git-check.outputs.modified != '' - id: generate-token - with: - app_id: ${{ secrets.CODE_FIX_APP_ID }} - private_key: ${{ secrets.CODE_FIX_APP_PRIVATE_KEY }} - - name: Push changes - if: github.event.pull_request && github.event.pull_request.head.repo.full_name == github.repository && steps.git-check.outputs.modified != '' - run: | - git config --global user.name 'Learning Equality' - git config --global user.email 'dev@learningequality.org' - git remote set-url origin https://x-access-token:${{ steps.generate-token.outputs.token }}@github.com/${{ github.repository }} - git commit -am "Frontend linting of ${{ steps.git-check.outputs.modified }}" - git push diff --git a/.github/workflows/frontendtest.yml b/.github/workflows/frontendtest.yml index e83ac316d8..f886ac8c6c 100644 --- a/.github/workflows/frontendtest.yml +++ b/.github/workflows/frontendtest.yml @@ -20,7 +20,7 @@ jobs: uses: fkirc/skip-duplicate-actions@master with: github_token: ${{ github.token }} - paths: '["**.vue", "**.js", "yarn.lock"]' + paths: '["**.vue", "**.js", "pnpm-lock.yaml"]' test: name: Frontend tests needs: pre_job @@ -28,20 +28,16 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - name: Use pnpm + uses: pnpm/action-setup@v4 - name: Use Node.js uses: actions/setup-node@v4 with: - node-version: '16.x' - - name: Cache Node.js modules - uses: actions/cache@v4 - with: - path: '**/node_modules' - key: ${{ runner.OS }}-node-${{ hashFiles('**/yarn.lock') }} - restore-keys: | - ${{ runner.OS }}-node- + node-version: '20.x' + cache: 'pnpm' - name: Install dependencies run: | - yarn --frozen-lockfile - npm rebuild node-sass + pnpm install --frozen-lockfile + pnpm rebuild node-sass - name: Run tests - run: yarn run test + run: pnpm run test diff --git a/.github/workflows/notify_team_new_comment.yml b/.github/workflows/notify_team_new_comment.yml deleted file mode 100644 index 6b6c1e21d7..0000000000 --- a/.github/workflows/notify_team_new_comment.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: Send a slack notification when a contributor comments on issue - -on: - issue_comment: - types: [created] - -jobs: - contributor_issue_comment: - name: Contributor issue comment - - if: >- - ${{ - !github.event.issue.pull_request && - github.event.comment.author_association != 'MEMBER' && - github.event.comment.author_association != 'OWNER' - }} - - runs-on: ubuntu-latest - steps: - - name: Escape title double quotes - id: escape_title - env: - ISSUE_TITLE: ${{ github.event.issue.title }} - run: echo "ISSUE_TITLE=${ISSUE_TITLE//\"/\\\"}" >> "$GITHUB_OUTPUT" - - - name: Send message to Slack channel - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK - uses: slackapi/slack-github-action@v1.27.0 - with: - payload: | - { - "text": "*[Studio] New comment on issue: <${{ github.event.issue.html_url }}#issuecomment-${{ github.event.comment.id }}|${{ steps.escape_title.outputs.ISSUE_TITLE }} by ${{ github.event.comment.user.login }}>*" - } diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml new file mode 100644 index 0000000000..1bb5d71a03 --- /dev/null +++ b/.github/workflows/pre-commit.yml @@ -0,0 +1,52 @@ +name: Linting + +on: + push: + branches: + - unstable + - hotfixes + - master + pull_request: + branches: + - unstable + - hotfixes + - master + +jobs: + pre_job: + name: Path match check + runs-on: ubuntu-latest + # Map a step output to a job output + outputs: + should_skip: ${{ steps.skip_check.outputs.should_skip }} + steps: + - id: skip_check + uses: fkirc/skip-duplicate-actions@master + with: + github_token: ${{ github.token }} + paths_ignore: '["**.po", "**.json"]' + linting: + name: All file linting + needs: pre_job + if: ${{ needs.pre_job.outputs.should_skip != 'true' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: '3.10' + - name: Use pnpm + uses: pnpm/action-setup@v4 + - name: Use Node.js + uses: actions/setup-node@v4 + with: + node-version: '20.x' + cache: 'pnpm' + - name: Install dependencies + run: | + pnpm install --frozen-lockfile + pnpm rebuild node-sass + - uses: pre-commit/action@v3.0.1 + - name: Run pre-commit-ci-lite + uses: pre-commit-ci/lite-action@v1.1.0 + if: always() diff --git a/.github/workflows/pythontest.yml b/.github/workflows/pythontest.yml index a4862cc657..ec99bec269 100644 --- a/.github/workflows/pythontest.yml +++ b/.github/workflows/pythontest.yml @@ -32,7 +32,7 @@ jobs: # Label used to access the service container postgres: # Docker Hub image - image: postgres:12 + image: postgres:16 # Provide the password for postgres env: POSTGRES_USER: learningequality @@ -68,7 +68,7 @@ jobs: -e "MINIO_ROOT_USER=development" \ -e "MINIO_ROOT_PASSWORD=development" \ -e "MINIO_DEFAULT_BUCKETS=content:public" \ - bitnami/minio:2024.5.28 + bitnamilegacy/minio:2024.5.28 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: diff --git a/.github/workflows/unassign-inactive.yaml b/.github/workflows/unassign-inactive.yaml new file mode 100644 index 0000000000..4151166a6d --- /dev/null +++ b/.github/workflows/unassign-inactive.yaml @@ -0,0 +1,15 @@ +name: "Unassign Inactive Contributors" +run-name: Unassign Inactive Contributors + +on: + schedule: + - cron: "1 0 * * 1" # Every Monday at 00:01 UTC + workflow_dispatch: + +jobs: + unassign-inactive: + uses: learningequality/.github/.github/workflows/unassign-inactive-issues.yaml@main + secrets: + LE_BOT_APP_ID: ${{ secrets.LE_BOT_APP_ID }} + LE_BOT_PRIVATE_KEY: ${{ secrets.LE_BOT_PRIVATE_KEY }} + SLACK_COMMUNITY_NOTIFICATIONS_WEBHOOK_URL: ${{ secrets.SLACK_COMMUNITY_NOTIFICATIONS_WEBHOOK_URL }} diff --git a/.github/workflows/update-pr-spreadsheet.yml b/.github/workflows/update-pr-spreadsheet.yml new file mode 100644 index 0000000000..8411239cd0 --- /dev/null +++ b/.github/workflows/update-pr-spreadsheet.yml @@ -0,0 +1,12 @@ +name: Update community pull requests spreadsheet +on: + pull_request_target: + types: [assigned,unassigned,opened,closed,reopened] + +jobs: + call-update-spreadsheet: + uses: learningequality/.github/.github/workflows/update-pr-spreadsheet.yml@main + secrets: + CONTRIBUTIONS_SPREADSHEET_ID: ${{ secrets.CONTRIBUTIONS_SPREADSHEET_ID }} + CONTRIBUTIONS_SHEET_NAME: ${{ secrets.CONTRIBUTIONS_SHEET_NAME }} + GH_UPLOADER_GCP_SA_CREDENTIALS: ${{ secrets.GH_UPLOADER_GCP_SA_CREDENTIALS }} diff --git a/.gitignore b/.gitignore index 64e2dc5733..9f9debd85c 100644 --- a/.gitignore +++ b/.gitignore @@ -27,8 +27,8 @@ var/ .vscode/ # IntelliJ IDE, except project config -.idea/* -!.idea/studio.iml +.idea/ +/*.iml # ignore future updates to run configuration .run/devserver.run.xml diff --git a/.htmlhintrc.js b/.htmlhintrc.js deleted file mode 100644 index 9c906cb345..0000000000 --- a/.htmlhintrc.js +++ /dev/null @@ -1,5 +0,0 @@ -const htmlHintConfig = require('kolibri-tools/.htmlhintrc'); -htmlHintConfig['id-class-value'] = false; -htmlHintConfig['--vue-component-conventions'] = false; -htmlHintConfig['id-class-value'] = false; -module.exports = htmlHintConfig; diff --git a/.idea/studio.iml b/.idea/studio.iml deleted file mode 100644 index 8fb5d94293..0000000000 --- a/.idea/studio.iml +++ /dev/null @@ -1,27 +0,0 @@ - - - - - - - - - - - - - - - - - - - diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 03ae517775..3b663b0af5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,25 +1,91 @@ repos: -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v2.2.1 + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 hooks: - - id: trailing-whitespace - - id: flake8 - entry: flake8 --ignore E402,W503,W504,E123,E122,E126 - - id: check-added-large-files + - id: trailing-whitespace + - id: check-added-large-files exclude: '^.+?\.ttf$' - - id: debug-statements - - id: end-of-file-fixer + - id: debug-statements + - id: end-of-file-fixer exclude: '^.+?\.json.+?\.yml$' -- repo: https://github.com/asottile/reorder_python_imports - rev: v1.4.0 + - repo: https://github.com/PyCQA/flake8 + rev: 7.1.2 hooks: - - id: reorder-python-imports + - id: flake8 + additional_dependencies: [ + 'flake8-print==5.0.0' + ] + - repo: https://github.com/asottile/reorder_python_imports + rev: v3.14.0 + hooks: + - id: reorder-python-imports language_version: python3 -- repo: local + - repo: local + hooks: + - id: frontend-lint + name: Linting of JS, Vue, SCSS and CSS files + description: This hook handles all frontend linting for Kolibri Studio + entry: pnpm run lint-frontend:format + language: system + files: \.(js|vue|scss|css)$ + - repo: local + hooks: + - id: no-auto-migrations + name: no auto-named migrations + entry: We do not allow _auto_ in migration names. Please give the migration a telling name. + language: fail + files: .*/migrations/.*_auto_.*\.py$ + exclude: (?x)^( + contentcuration/kolibri_content/migrations/0014_auto_20210603_1536.py| + contentcuration/kolibri_content/migrations/0023_auto_20250417_1516.py| + contentcuration/kolibri_content/migrations/0007_auto_20200613_0050.py| + contentcuration/kolibri_content/migrations/0004_auto_20180910_2342.py| + contentcuration/kolibri_content/migrations/0002_auto_20180327_1414.py| + contentcuration/kolibri_content/migrations/0022_auto_20240915_1414.py| + contentcuration/kolibri_content/migrations/0011_auto_20210504_1744.py| + contentcuration/kolibri_content/migrations/0010_auto_20210202_0604.py| + contentcuration/kolibri_content/migrations/0018_auto_20220224_2031.py| + contentcuration/kolibri_content/migrations/0019_auto_20230207_0116.py| + contentcuration/kolibri_content/migrations/0005_auto_20190424_1709.py| + contentcuration/kolibri_content/migrations/0006_auto_20191028_2325.py| + contentcuration/kolibri_content/migrations/0015_auto_20210707_1606.py| + contentcuration/kolibri_content/migrations/0013_auto_20210519_1759.py| + contentcuration/kolibri_content/migrations/0012_auto_20210511_1605.py| + contentcuration/kolibri_content/migrations/0021_auto_20240612_1847.py| + contentcuration/search/migrations/0002_auto_20201215_2110.py| + contentcuration/contentcuration/migrations/0001_squashed_0094_auto_20180910_2342.py| + contentcuration/contentcuration/migrations/0002_auto_20181220_1734.py| + contentcuration/contentcuration/migrations/0102_auto_20190904_1627.py| + contentcuration/contentcuration/migrations/0110_auto_20200511_2245.py| + contentcuration/contentcuration/migrations/0104_auto_20191028_2325.py| + contentcuration/contentcuration/migrations/0111_auto_20200513_2252.py| + contentcuration/contentcuration/migrations/0130_auto_20210706_2005.py| + contentcuration/contentcuration/migrations/0098_auto_20190424_1709.py| + contentcuration/contentcuration/migrations/0128_auto_20210511_1605.py| + contentcuration/contentcuration/migrations/0112_auto_20200613_0050.py| + contentcuration/contentcuration/migrations/0127_auto_20210504_1744.py| + contentcuration/contentcuration/migrations/0106_auto_20191113_0217.py| + contentcuration/contentcuration/migrations/0103_auto_20190905_0408.py| + contentcuration/contentcuration/migrations/0121_auto_20210305_2028.py| + contentcuration/contentcuration/migrations/0133_auto_20220124_2149.py| + contentcuration/contentcuration/migrations/0132_auto_20210708_0011.py| + contentcuration/contentcuration/migrations/0120_auto_20210128_1646.py| + contentcuration/contentcuration/migrations/0131_auto_20210707_2326.py| + contentcuration/contentcuration/migrations/0126_auto_20210219_2314.py| + contentcuration/contentcuration/migrations/0107_auto_20191115_2344.py| + contentcuration/contentcuration/migrations/0151_auto_20250417_1516.py| + contentcuration/contentcuration/migrations/0099_auto_20190715_2201.py| + contentcuration/contentcuration/migrations/0123_auto_20210407_0057.py| + contentcuration/contentcuration/migrations/0129_auto_20210519_2213.py| + contentcuration/contentcuration/migrations/0109_auto_20191202_1759.py| + contentcuration/kolibri_public/migrations/0004_auto_20240612_1847.py| + contentcuration/kolibri_public/migrations/0006_auto_20250417_1516.py| + )$ + # Always keep black as the final hook so it reformats any other reformatting. + - repo: https://github.com/python/black + rev: 20.8b1 hooks: - - id: frontend-lint - name: Linting of JS, Vue, SCSS and CSS files - description: This hook handles all frontend linting for Kolibri Studio - entry: yarn run lint-frontend:format - language: system - files: \.(js|vue|scss|less|css)$ + - id: black + additional_dependencies: [ + 'click==8.0.4' + ] diff --git a/.prettierrc.js b/.prettierrc.js deleted file mode 120000 index f425df857f..0000000000 --- a/.prettierrc.js +++ /dev/null @@ -1 +0,0 @@ -./node_modules/kolibri-tools/.prettierrc.js \ No newline at end of file diff --git a/.prettierrc.js b/.prettierrc.js new file mode 100644 index 0000000000..aa0587e6af --- /dev/null +++ b/.prettierrc.js @@ -0,0 +1 @@ +module.exports = require('kolibri-format/.prettierrc'); diff --git a/.stylelintrc.js b/.stylelintrc.js index 6ac1911a5a..9b7b1077f7 100644 --- a/.stylelintrc.js +++ b/.stylelintrc.js @@ -1,6 +1,6 @@ module.exports = { extends: [ - 'kolibri-tools/.stylelintrc', + 'kolibri-format/.stylelintrc', ], rules: { /* @@ -8,8 +8,14 @@ module.exports = { * Inline comments explain why rule is ignored */ 'selector-max-id': null, // This would require a major refactor - 'at-rule-no-unknown': null, // we're using LESS - 'scss/at-rule-no-unknown': null, // we're using LESS - 'csstree/validator': null // this triggers issues with unknown at rules too. + 'csstree/validator': null, // this triggers issues with unknown at rules too. + 'selector-pseudo-element-no-unknown': [ + true, + { + // In Vue 2.6 and later, `::v-deep` is used for deep selectors. + // This rule allows `::v-deep` to prevent linting errors. + ignorePseudoElements: ['v-deep'], + } + ] }, }; diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 35d0e2c4c1..0000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,28 +0,0 @@ - -## How can I contribute? - -1. 📙 **Skim through the [Developer documentation](./docs/_index.md)** to understand where to refer later on. -2. 💻 **Follow the [Local development instructions](./docs/local_dev_docker.md) to set up your development server.** -3. 🔍 **Search for issues tagged as [help wanted](https://github.com/learningequality/studio/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22+no%3Aassignee) or [good first issue](https://github.com/learningequality/studio/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22+no%3Aassignee).** -4. 🗣️ **Ask us for an assignment in the comments of an issue you've chosen.** Please request assignment of a reasonable amount of issues at a time. Once you finish your current issue or two, you are welcome to ask for more. - -**❓ Where to ask questions** - -- For anything development related, refer to the [Developer documentation](./docs/_index.md) at first. Some answers may already be there. -- For questions related to a specific issue or assignment requests, use the corresponding issue's comments section. -- Visit [GitHub Discussions](https://github.com/learningequality/studio/discussions) to ask about anything related to contributing or to troubleshoot development server issues. - -**👥 How to connect** - -- We encourage you to visit [GitHub Discussions](https://github.com/learningequality/studio/discussions) to connect with the Learning Equality team as well as with other contributors. -- If you'd like to contribute on a regular basis, we are happy to invite you to our open-source community Slack channel. Get in touch with us at info@learningequality.org to receive an invitation. - ---- - -🕖 Please allow us a few days to reply to your comments. If you don't hear from us within a week, reach out via [GitHub Discussions](https://github.com/learningequality/studio/discussions). - -As soon as you open a pull request, it may take us a week or two to review it as we're a small team. We appreciate your contribution and will provide feedback. - ---- - -*Thank you for your interest in contributing! Learning Equality was founded by volunteers dedicated to helping make educational materials more accessible to those in need, and every contribution makes a difference.* diff --git a/LICENSE b/LICENSE index b10a6fd5cb..c0688ab8ea 100644 --- a/LICENSE +++ b/LICENSE @@ -19,4 +19,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/Makefile b/Makefile index 051053bab3..002d337323 100644 --- a/Makefile +++ b/Makefile @@ -66,7 +66,7 @@ reconcile: ############################################################### i18n-extract-frontend: # generate frontend messages - yarn makemessages + pnpm makemessages i18n-extract-backend: # generate backend messages @@ -75,7 +75,7 @@ i18n-extract-backend: i18n-extract: i18n-extract-frontend i18n-extract-backend i18n-transfer-context: - yarn transfercontext + pnpm transfercontext i18n-django-compilemessages: # Change working directory to contentcuration/ such that compilemessages @@ -94,9 +94,9 @@ i18n-pretranslate-approve-all: i18n-download-translations: python node_modules/kolibri-tools/lib/i18n/crowdin.py rebuild-translations ${branch} python node_modules/kolibri-tools/lib/i18n/crowdin.py download-translations ${branch} - yarn exec kolibri-tools i18n-code-gen -- --output-dir ./contentcuration/contentcuration/frontend/shared/i18n + pnpm exec kolibri-tools i18n-code-gen -- --output-dir ./contentcuration/contentcuration/frontend/shared/i18n $(MAKE) i18n-django-compilemessages - yarn exec kolibri-tools i18n-create-message-files -- --namespace contentcuration --searchPath ./contentcuration/contentcuration/frontend + pnpm exec kolibri-tools i18n-create-message-files -- --namespace contentcuration --searchPath ./contentcuration/contentcuration/frontend i18n-download: i18n-download-translations diff --git a/README.md b/README.md index 362093df9b..c925617db2 100644 --- a/README.md +++ b/README.md @@ -13,35 +13,11 @@ Kolibri Studio uses the [Django framework](https://www.djangoproject.com/) for t If you are looking for help setting up custom content channels, uploading and organizing resources using Kolibri Studio, please refer to the [User Guide](https://kolibri-studio.readthedocs.io/en/latest/). - ## How can I contribute? -1. 📙 **Skim through the [Developer documentation](./docs/_index.md)** to understand where to refer later on. -2. 💻 **Follow the [Local development instructions](./docs/local_dev_docker.md) to set up your development server.** -3. 🔍 **Search for issues tagged as [help wanted](https://github.com/learningequality/studio/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22+no%3Aassignee) or [good first issue](https://github.com/learningequality/studio/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22+no%3Aassignee).** -4. 🗣️ **Ask us for an assignment in the comments of an issue you've chosen.** Please request assignment of a reasonable amount of issues at a time. Once you finish your current issue or two, you are welcome to ask for more. - -**❓ Where to ask questions** - -- For anything development related, refer to the [Developer documentation](./docs/_index.md) at first. Some answers may already be there. -- For questions related to a specific issue or assignment requests, use the corresponding issue's comments section. -- Visit [GitHub Discussions](https://github.com/learningequality/studio/discussions) to ask about anything related to contributing or to troubleshoot development server issues. - -**👥 How to connect** - -- We encourage you to visit [GitHub Discussions](https://github.com/learningequality/studio/discussions) to connect with the Learning Equality team as well as with other contributors. -- If you'd like to contribute on a regular basis, we are happy to invite you to our open-source community Slack channel. Get in touch with us at info@learningequality.org to receive an invitation. - ---- - -🕖 Please allow us a few days to reply to your comments. If you don't hear from us within a week, reach out via [GitHub Discussions](https://github.com/learningequality/studio/discussions). - -As soon as you open a pull request, it may take us a week or two to review it as we're a small team. We appreciate your contribution and will provide feedback. - ---- - -*Thank you for your interest in contributing! Learning Equality was founded by volunteers dedicated to helping make educational materials more accessible to those in need, and every contribution makes a difference.* +We welcome contributors! +To find out how to contribute, visit [Contributing to our open code base](https://learningequality.org/contributing-to-our-open-code-base). ## Licensing Kolibri Studio is licensed under the MIT license. See [LICENSE](./LICENSE) for more details. diff --git a/contentcuration/automation/admin.py b/contentcuration/automation/admin.py index 4185d360e9..5d28852b15 100644 --- a/contentcuration/automation/admin.py +++ b/contentcuration/automation/admin.py @@ -1,3 +1,2 @@ # from django.contrib import admin - # Register your models here. diff --git a/contentcuration/automation/apps.py b/contentcuration/automation/apps.py index eaa1d3d4e1..0fbbd020ac 100644 --- a/contentcuration/automation/apps.py +++ b/contentcuration/automation/apps.py @@ -2,5 +2,5 @@ class AutomationConfig(AppConfig): - default_auto_field = 'django.db.models.BigAutoField' - name = 'automation' + default_auto_field = "django.db.models.BigAutoField" + name = "automation" diff --git a/contentcuration/automation/migrations/0001_initial.py b/contentcuration/automation/migrations/0001_initial.py new file mode 100644 index 0000000000..6b62bc0ae7 --- /dev/null +++ b/contentcuration/automation/migrations/0001_initial.py @@ -0,0 +1,69 @@ +# Generated by Django 3.2.24 on 2025-03-26 11:12 +import uuid + +import django.db.models.deletion +from django.db import migrations +from django.db import models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ("kolibri_public", "0005_alter_localfile_extension"), + ] + + operations = [ + migrations.CreateModel( + name="RecommendationsCache", + fields=[ + ( + "id", + models.UUIDField( + default=uuid.uuid4, + editable=False, + primary_key=True, + serialize=False, + ), + ), + ("request_hash", models.CharField(max_length=32, null=True)), + ("topic_id", models.UUIDField()), + ("rank", models.IntegerField(default=0, null=True)), + ("override_threshold", models.BooleanField(default=False)), + ("timestamp", models.DateTimeField(auto_now_add=True)), + ( + "channel", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_recommendations", + to="contentcuration.channel", + ), + ), + ( + "contentnode", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="contentnode_recommendations", + to="kolibri_public.contentnode", + ), + ), + ], + ), + migrations.AddIndex( + model_name="recommendationscache", + index=models.Index(fields=["request_hash"], name="request_hash_idx"), + ), + migrations.AddIndex( + model_name="recommendationscache", + index=models.Index(fields=["contentnode"], name="contentnode_idx"), + ), + migrations.AlterUniqueTogether( + name="recommendationscache", + unique_together={("request_hash", "contentnode")}, + ), + ] diff --git a/contentcuration/automation/models.py b/contentcuration/automation/models.py index 0b4331b362..5739145914 100644 --- a/contentcuration/automation/models.py +++ b/contentcuration/automation/models.py @@ -1,3 +1,40 @@ -# from django.db import models +import uuid -# Create your models here. +from django.db import models +from kolibri_public.models import ContentNode + +from contentcuration.models import Channel + + +REQUEST_HASH_INDEX_NAME = "request_hash_idx" +CONTENTNODE_INDEX_NAME = "contentnode_idx" + + +class RecommendationsCache(models.Model): + id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) + request_hash = models.CharField(max_length=32, null=True) + topic_id = models.UUIDField() + contentnode = models.ForeignKey( + ContentNode, + null=True, + blank=True, + related_name="contentnode_recommendations", + on_delete=models.CASCADE, + ) + channel = models.ForeignKey( + Channel, + null=True, + blank=True, + related_name="channel_recommendations", + on_delete=models.CASCADE, + ) + rank = models.IntegerField(default=0, null=True) + override_threshold = models.BooleanField(default=False) + timestamp = models.DateTimeField(auto_now_add=True) + + class Meta: + unique_together = ("request_hash", "contentnode") + indexes = [ + models.Index(fields=["request_hash"], name=REQUEST_HASH_INDEX_NAME), + models.Index(fields=["contentnode"], name=CONTENTNODE_INDEX_NAME), + ] diff --git a/contentcuration/automation/tests.py b/contentcuration/automation/tests.py index a79ca8be56..601fc8616b 100644 --- a/contentcuration/automation/tests.py +++ b/contentcuration/automation/tests.py @@ -1,3 +1,2 @@ # from django.test import TestCase - # Create your tests here. diff --git a/contentcuration/automation/tests/appnexus/test_base.py b/contentcuration/automation/tests/appnexus/test_base.py index 7944e00e4f..e29358e45c 100644 --- a/contentcuration/automation/tests/appnexus/test_base.py +++ b/contentcuration/automation/tests/appnexus/test_base.py @@ -1,48 +1,98 @@ -import pytest +import time +from unittest.mock import patch -from automation.utils.appnexus.base import Adapter +import mock +import pytest +import requests from automation.utils.appnexus.base import Backend +from automation.utils.appnexus.base import BackendRequest +from automation.utils.appnexus.base import BackendResponse +from automation.utils.appnexus.base import SessionWithMaxConnectionAge +from automation.utils.appnexus.errors import ConnectionError +from automation.utils.appnexus.errors import InvalidResponse + + +def test_session_with_max_connection_age_request(): + with patch.object(requests.Session, "request") as mock_request: + session = SessionWithMaxConnectionAge() + session.request("GET", "https://example.com") + assert mock_request.call_count == 1 + + +def test_session_with_max_connection_age_not_closing_connections(): + with patch.object(requests.Session, "close") as mock_close, patch.object( + requests.Session, "request" + ) as mock_request: + session = SessionWithMaxConnectionAge(60) + session.request("GET", "https://example.com") + time.sleep(0.1) + session.request("GET", "https://example.com") + + assert mock_close.call_count == 0 + assert mock_request.call_count == 2 + + +def test_session_with_max_connection_age_closing_connections(): + with patch.object(requests.Session, "close") as mock_close, patch.object( + requests.Session, "request" + ) as mock_request: + session = SessionWithMaxConnectionAge(1) + session.request("GET", "https://example.com") + time.sleep(2) + session.request("GET", "https://example.com") + + assert mock_close.call_count == 1 + assert mock_request.call_count == 2 + + +@mock.patch("automation.utils.appnexus.base.Backend.connect") +def test_backend_connect(mock_connect): + mock_connect.return_value = True + + backend = Backend() + result = backend.connect() + mock_connect.assert_called_once() + assert result is True -class MockBackend(Backend): - def connect(self) -> None: - return super().connect() - def make_request(self, request): - return super().make_request(request) +@mock.patch("automation.utils.appnexus.base.Backend.connect") +def test_backend_connect_error(mock_connect): + mock_connect.side_effect = [ConnectionError("Failed to connect"), False] - @classmethod - def _create_instance(cls) -> 'MockBackend': - return cls() + backend = Backend() + with pytest.raises(ConnectionError) as exc_info: + backend.connect() + assert str(exc_info.value) == "Failed to connect" -class MockAdapter(Adapter): - def mockoperation(self): - pass + result = backend.connect() + assert result is False + assert mock_connect.call_count == 2 -def test_backend_error(): - with pytest.raises(NotImplementedError) as error: - Backend.get_instance() - assert "Subclasses should implement the creation of instance" in str(error.value) -def test_backend_singleton(): - b1, b2 = MockBackend.get_instance(), MockBackend.get_instance() - assert id(b1) == id(b2) +@mock.patch("automation.utils.appnexus.base.Backend.make_request") +def test_backend_request(mock_make_request): + mock_response = BackendResponse(data=[{"key": "value"}]) + mock_make_request.return_value = mock_response + backend = Backend() + request = BackendRequest(method="GET", path="/api/test") + response = backend.make_request(request) -def test_adapter_creation(): - a = MockAdapter(backend=MockBackend) - assert isinstance(a, Adapter) + assert response == mock_response + mock_make_request.assert_called_once_with(request) -def test_adapter_backend_default(): - b = MockBackend() - adapter = Adapter(backend=b) - assert isinstance(adapter.backend, Backend) +@mock.patch("automation.utils.appnexus.base.Backend.make_request") +def test_backend_request_error(mock_make_request): + mock_make_request.side_effect = InvalidResponse("Request failed") + backend = Backend() + request = BackendRequest(method="GET", path="/api/test") -def test_adapter_backend_custom(): - b = MockBackend() - a = Adapter(backend=b) - assert a.backend is b + with pytest.raises(InvalidResponse) as exc_info: + backend.make_request(request) + assert str(exc_info.value) == "Request failed" + mock_make_request.assert_called_once_with(request) diff --git a/contentcuration/automation/tests/test_recommendations_cache_model.py b/contentcuration/automation/tests/test_recommendations_cache_model.py new file mode 100644 index 0000000000..535e45c21e --- /dev/null +++ b/contentcuration/automation/tests/test_recommendations_cache_model.py @@ -0,0 +1,74 @@ +import uuid + +from automation.models import RecommendationsCache +from django.db import IntegrityError +from kolibri_public.models import ContentNode + +from contentcuration.models import Channel +from contentcuration.tests.base import StudioTestCase + + +class TestRecommendationsCache(StudioTestCase): + def setUp(self): + self.topic_id = uuid.uuid4() + self.content_node = ContentNode.objects.create( + id=uuid.uuid4(), + title="Test Content Node", + content_id=uuid.uuid4(), + channel_id=uuid.uuid4(), + ) + self.channel = Channel.objects.create( + id=uuid.uuid4(), + name="Test Channel", + actor_id=1, + ) + self.cache = RecommendationsCache.objects.create( + request_hash="test_hash", + topic_id=self.topic_id, + contentnode=self.content_node, + channel=self.channel, + rank=1, + override_threshold=False, + ) + + def test_cache_creation(self): + self.assertIsInstance(self.cache, RecommendationsCache) + self.assertEqual(self.cache.request_hash, "test_hash") + self.assertEqual(self.cache.topic_id, self.topic_id) + self.assertEqual(self.cache.contentnode, self.content_node) + self.assertEqual(self.cache.channel, self.channel) + self.assertEqual(self.cache.rank, 1) + self.assertFalse(self.cache.override_threshold) + + def test_cache_retrieval(self): + retrieved_cache = RecommendationsCache.objects.get(request_hash="test_hash") + self.assertEqual(retrieved_cache, self.cache) + + def test_cache_uniqueness(self): + with self.assertRaises(IntegrityError): + RecommendationsCache.objects.create( + request_hash="test_hash", + topic_id=self.topic_id, + contentnode=self.content_node, + channel=self.channel, + rank=2, + override_threshold=True, + ) + + def test_bulk_create_ignore_conflicts_true(self): + initial_count = RecommendationsCache.objects.count() + try: + RecommendationsCache.objects.bulk_create( + [self.cache, self.cache], ignore_conflicts=True + ) + except IntegrityError: + self.fail("bulk_create raised IntegrityError unexpectedly!") + + final_count = RecommendationsCache.objects.count() + self.assertEqual(initial_count, final_count) + + def test_bulk_create_ignore_conflicts_false(self): + with self.assertRaises(IntegrityError): + RecommendationsCache.objects.bulk_create( + [self.cache, self.cache], ignore_conflicts=False + ) diff --git a/contentcuration/automation/utils/appnexus/base.py b/contentcuration/automation/utils/appnexus/base.py index ab9e6d5096..c242593feb 100644 --- a/contentcuration/automation/utils/appnexus/base.py +++ b/contentcuration/automation/utils/appnexus/base.py @@ -1,46 +1,186 @@ +import logging +import time from abc import ABC from abc import abstractmethod -from builtins import NotImplementedError + +import requests +from requests.adapters import HTTPAdapter +from urllib3 import Retry + +from . import errors + + +class SessionWithMaxConnectionAge(requests.Session): + """ + Session with a maximum connection age. If the connection is older than the specified age, it will be closed and a new one will be created. + The age is specified in seconds. + """ + + def __init__(self, age=100): + super().__init__() + self.age = age + self.last_used = time.time() + + def request(self, *args, **kwargs): + current_time = time.time() + if current_time - self.last_used > self.age: + self.close() + + self.last_used = current_time + + return super().request(*args, **kwargs) class BackendRequest(object): - """ Class that should be inherited by specific backend for its requests""" - pass + """ Class that holds the request information for the backend """ + + def __init__( + self, + method, + path, + params=None, + data=None, + json=None, + headers=None, + timeout=(5, 100), + **kwargs, + ): + self.method = method + self.path = path + self.params = params + self.data = data + self.json = json + self.headers = headers + self.timeout = timeout + for key, value in kwargs.items(): + setattr(self, key, value) class BackendResponse(object): """ Class that should be inherited by specific backend for its responses""" - pass + + def __init__(self, **kwargs): + for key, value in kwargs.items(): + setattr(self, key, value) class Backend(ABC): """ An abstract base class for backend interfaces that also implements the singleton pattern """ - _instance = None - - def __new__(class_, *args, **kwargs): - if not isinstance(class_._instance, class_): - class_._instance = object.__new__(class_, *args, **kwargs) - return class_._instance - @abstractmethod - def connect(self) -> None: + _instance = None + session = None + base_url = None + connect_endpoint = None + max_retries = 1 + backoff_factor = 0.3 + + def __new__(cls, *args, **kwargs): + if not isinstance(cls._instance, cls): + cls._instance = object.__new__(cls) + return cls._instance + + def __init__( + self, + url_prefix="stable", + ): + self.url_prefix = url_prefix + if not self.session: + self._setup_session() + + def _setup_session(self): + self.session = SessionWithMaxConnectionAge() + + retry = Retry( + total=self.max_retries, + backoff_factor=self.backoff_factor, + ) + adapter = HTTPAdapter(max_retries=retry) + + self.session.mount("https://", adapter) + self.session.mount("http://", adapter) + + def _construct_full_url(self, path): + """This method combine base_url, url_prefix, and path in that order, removing any trailing and leading slashes.""" + url_array = [] + if self.base_url: + url_array.append(self.base_url.rstrip("/")) + if self.url_prefix: + url_array.append(self.url_prefix.rstrip("/").lstrip("/")) + if path: + url_array.append(path.lstrip("/")) + return "/".join(url_array) + + def _make_request(self, request): + url = self._construct_full_url(request.path) + try: + response = self.session.request( + request.method, + url, + params=request.params, + data=request.data, + headers=request.headers, + json=request.json, + timeout=request.timeout, + ) + response.raise_for_status() + return response + except ( + requests.exceptions.ConnectionError, + requests.exceptions.RequestException, + requests.exceptions.SSLError, + ) as e: + logging.exception(e) + raise errors.ConnectionError(f"Unable to connect to {url}") + except ( + requests.exceptions.Timeout, + requests.exceptions.ConnectTimeout, + requests.exceptions.ReadTimeout, + ) as e: + logging.exception(e) + raise errors.TimeoutError(f"Timeout occurred while connecting to {url}") + except ( + requests.exceptions.TooManyRedirects, + requests.exceptions.HTTPError, + ) as e: + logging.exception(e) + raise errors.HttpError(f"HTTP error occurred while connecting to {url}") + except ( + requests.exceptions.URLRequired, + requests.exceptions.MissingSchema, + requests.exceptions.InvalidSchema, + requests.exceptions.InvalidURL, + requests.exceptions.InvalidHeader, + requests.exceptions.InvalidJSONError, + ) as e: + logging.exception(e) + raise errors.InvalidRequest(f"Invalid request to {url}") + except ( + requests.exceptions.ContentDecodingError, + requests.exceptions.ChunkedEncodingError, + ) as e: + logging.exception(e) + raise errors.InvalidResponse(f"Invalid response from {url}") + + def connect(self, **kwargs): """ Establishes a connection to the backend service. """ - pass - - @abstractmethod - def make_request(self, request) -> BackendResponse: - """ Make a request based on "request" """ - pass - - @classmethod - def get_instance(cls) -> 'Backend': - """ Returns existing instance, if not then create one. """ - return cls._instance if cls._instance else cls._create_instance() - - @classmethod - def _create_instance(cls) -> 'Backend': - """ Returns the instance after creating it. """ - raise NotImplementedError("Subclasses should implement the creation of instance") + try: + request = BackendRequest(method="GET", path=self.connect_endpoint, **kwargs) + api_response = self._make_request(request) + response_data = api_response.json() + status = response_data.get("status", None) + return status == "OK" + except Exception: + return False + + def make_request(self, request): + """ Make a request to the backend service. """ + try: + api_response = self._make_request(request) + response_data = api_response.json() + return BackendResponse(data=response_data) + except ValueError as e: + logging.exception(e) + raise errors.InvalidResponse("Invalid response from backend") class BackendFactory(ABC): diff --git a/contentcuration/automation/utils/appnexus/errors.py b/contentcuration/automation/utils/appnexus/errors.py new file mode 100644 index 0000000000..34ef92f749 --- /dev/null +++ b/contentcuration/automation/utils/appnexus/errors.py @@ -0,0 +1,18 @@ +class ConnectionError(Exception): + pass + + +class TimeoutError(Exception): + pass + + +class HttpError(Exception): + pass + + +class InvalidRequest(Exception): + pass + + +class InvalidResponse(Exception): + pass diff --git a/contentcuration/automation/views.py b/contentcuration/automation/views.py index fd0e044955..3e6a05d4a3 100644 --- a/contentcuration/automation/views.py +++ b/contentcuration/automation/views.py @@ -1,3 +1,2 @@ # from django.shortcuts import render - # Create your views here. diff --git a/contentcuration/contentcuration/__init__.py b/contentcuration/contentcuration/__init__.py index d13e951393..b6fc8176d9 100644 --- a/contentcuration/contentcuration/__init__.py +++ b/contentcuration/contentcuration/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - # This will make sure the app is always imported when # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa diff --git a/contentcuration/contentcuration/admin.py b/contentcuration/contentcuration/admin.py index 37b76372c9..d64457522d 100644 --- a/contentcuration/contentcuration/admin.py +++ b/contentcuration/contentcuration/admin.py @@ -9,8 +9,13 @@ class UserAdmin(admin.ModelAdmin): - list_display = ('first_name', 'last_name', 'email', 'date_joined',) - date_hierarchy = 'date_joined' + list_display = ( + "first_name", + "last_name", + "email", + "date_joined", + ) + date_hierarchy = "date_joined" admin.site.register(User, UserAdmin) diff --git a/contentcuration/contentcuration/api.py b/contentcuration/contentcuration/api.py index b297ffaba6..77b8f7b054 100644 --- a/contentcuration/contentcuration/api.py +++ b/contentcuration/contentcuration/api.py @@ -25,7 +25,9 @@ def write_file_to_storage(fobj, check_valid=False, name=None): fobj.seek(0) if check_valid and hashed_filename != filename: - raise SuspiciousOperation("Failed to upload file {0}: hash is invalid".format(name)) + raise SuspiciousOperation( + "Failed to upload file {0}: hash is invalid".format(name) + ) # Get location of file file_path = models.generate_object_storage_name(hashed_filename, full_filename) @@ -33,7 +35,11 @@ def write_file_to_storage(fobj, check_valid=False, name=None): # Write file storage = default_storage if storage.exists(file_path): - logging.info("{} exists in Google Cloud Storage, so it's not saved again.".format(file_path)) + logging.info( + "{} exists in Google Cloud Storage, so it's not saved again.".format( + file_path + ) + ) else: storage.save(file_path, fobj) return full_filename @@ -52,7 +58,11 @@ def write_raw_content_to_storage(contents, ext=None): # Write file storage = default_storage if storage.exists(file_path): - logging.info("{} exists in Google Cloud Storage, so it's not saved again.".format(file_path)) + logging.info( + "{} exists in Google Cloud Storage, so it's not saved again.".format( + file_path + ) + ) else: storage.save(file_path, BytesIO(contents)) diff --git a/contentcuration/contentcuration/apps.py b/contentcuration/contentcuration/apps.py index 6f344aa63d..06d62d9561 100644 --- a/contentcuration/contentcuration/apps.py +++ b/contentcuration/contentcuration/apps.py @@ -2,7 +2,7 @@ class ContentConfig(AppConfig): - name = 'contentcuration' + name = "contentcuration" def ready(self): # Import signals diff --git a/contentcuration/contentcuration/celery.py b/contentcuration/contentcuration/celery.py index d9186c2aed..9f74f2d2fc 100644 --- a/contentcuration/contentcuration/celery.py +++ b/contentcuration/contentcuration/celery.py @@ -3,9 +3,12 @@ from django.conf import settings from contentcuration.utils.celery.app import CeleryApp +from contentcuration.utils.celery.tasks import CeleryTask # set the default Django settings module for the 'celery' program. os.environ.setdefault("DJANGO_SETTINGS_MODULE", "contentcuration.settings") -app = CeleryApp("contentcuration") +# Celery update now requires that we must pass the task_cls to the CeleryApp, instead +# of setting it as an attribute on our custom Celery class +app = CeleryApp("contentcuration", task_cls=CeleryTask) app.config_from_object(settings.CELERY) diff --git a/contentcuration/contentcuration/collectstatic_settings.py b/contentcuration/contentcuration/collectstatic_settings.py index dae329577e..038f204069 100644 --- a/contentcuration/contentcuration/collectstatic_settings.py +++ b/contentcuration/contentcuration/collectstatic_settings.py @@ -1,6 +1,6 @@ -# Settings used by containers running collectstatic. Scope our services +# Settings used by containers running collectstatic. Scope our services # to the only ones needed to run collectstatic. - +# flake8: noqa: F403, F405 from .settings import * -CACHES['default']['BACKEND'] = "django_prometheus.cache.backends.locmem.LocMemCache" +CACHES["default"]["BACKEND"] = "django_prometheus.cache.backends.locmem.LocMemCache" diff --git a/contentcuration/contentcuration/constants/completion_criteria.py b/contentcuration/contentcuration/constants/completion_criteria.py index ffed5d7821..1a8c101e38 100644 --- a/contentcuration/contentcuration/constants/completion_criteria.py +++ b/contentcuration/contentcuration/constants/completion_criteria.py @@ -13,7 +13,9 @@ def _build_validator(): """ cls = validator_for(completion_criteria.SCHEMA) validator = cls(completion_criteria.SCHEMA) - validator.resolver.store.update(RefResolver.from_schema(mastery_criteria.SCHEMA).store) + validator.resolver.store.update( + RefResolver.from_schema(mastery_criteria.SCHEMA).store + ) return validator @@ -86,10 +88,16 @@ def validate(data, kind=None): elif error.absolute_path: # if there's a path to a field, we can give a specific error json_path = ".".join(error.absolute_path) - error_descriptions.append(ValidationError("{} {}".format(json_path, error.message))) + error_descriptions.append( + ValidationError("{} {}".format(json_path, error.message)) + ) else: # without a path, likely top-level validation error, e.g. `anyOf` conditions - error_descriptions.append(ValidationError("object doesn't satisfy '{}' conditions".format(error.validator))) + error_descriptions.append( + ValidationError( + "object doesn't satisfy '{}' conditions".format(error.validator) + ) + ) if error_descriptions: e = ValidationError("Completion criteria doesn't conform to schema") diff --git a/contentcuration/contentcuration/constants/feature_flags.py b/contentcuration/contentcuration/constants/feature_flags.py index 3011ad3385..ae04284f20 100644 --- a/contentcuration/contentcuration/constants/feature_flags.py +++ b/contentcuration/contentcuration/constants/feature_flags.py @@ -9,7 +9,9 @@ def _schema(): """ Loads JSON schema file """ - file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../static/feature_flags.json') + file = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "../static/feature_flags.json" + ) with open(file) as f: data = json.load(f) return data diff --git a/contentcuration/contentcuration/constants/feedback.py b/contentcuration/contentcuration/constants/feedback.py index 178c4a99ab..be92c2f99e 100644 --- a/contentcuration/contentcuration/constants/feedback.py +++ b/contentcuration/contentcuration/constants/feedback.py @@ -1,8 +1,8 @@ FEEDBACK_TYPE_CHOICES = ( - ('IMPORTED', 'Imported'), - ('REJECTED', 'Rejected'), - ('PREVIEWED', 'Previewed'), - ('SHOWMORE', 'Show More'), - ('IGNORED', 'Ignored'), - ('FLAGGED', 'Flagged'), + ("IMPORTED", "Imported"), + ("REJECTED", "Rejected"), + ("PREVIEWED", "Previewed"), + ("SHOWMORE", "Show More"), + ("IGNORED", "Ignored"), + ("FLAGGED", "Flagged"), ) diff --git a/contentcuration/contentcuration/db/advisory_lock.py b/contentcuration/contentcuration/db/advisory_lock.py index f1d71995ed..52400659c4 100644 --- a/contentcuration/contentcuration/db/advisory_lock.py +++ b/contentcuration/contentcuration/db/advisory_lock.py @@ -8,8 +8,8 @@ # signed limits are 2**32 or 2**64, so one less power of 2 # to become unsigned limits (half above 0, half below 0) -INT_32BIT = 2**31 -INT_64BIT = 2**63 +INT_32BIT = 2 ** 31 +INT_64BIT = 2 ** 63 class AdvisoryLockBusy(RuntimeError): @@ -64,7 +64,7 @@ def execute_lock(key1, key2=None, unlock=False, session=False, shared=False, wai xact_="" if session else "xact_", lock="unlock" if unlock else "lock", _shared="_shared" if shared else "", - keys=", ".join(["%s" for i in range(0, 2 if key2 is not None else 1)]) + keys=", ".join(["%s" for i in range(0, 2 if key2 is not None else 1)]), ) log_query = f"'{query}' with params {keys}" diff --git a/contentcuration/contentcuration/db/models/expressions.py b/contentcuration/contentcuration/db/models/expressions.py index 3daec06977..5ea7afbb14 100644 --- a/contentcuration/contentcuration/db/models/expressions.py +++ b/contentcuration/contentcuration/db/models/expressions.py @@ -15,8 +15,11 @@ class WhenQ(Q): Example: queryset.annotate(some_thing=Case(When(condition=QExpression(BoolExpr(...)), then=...))) """ + def resolve_expression(self, *args, **kwargs): - return WhereNode([child.resolve_expression(*args, **kwargs) for child in self.children]) + return WhereNode( + [child.resolve_expression(*args, **kwargs) for child in self.children] + ) class BooleanComparison(CombinedExpression): @@ -27,6 +30,7 @@ class BooleanComparison(CombinedExpression): Example: BooleanExpression(F('x'), '<=', Value(123)) """ + output_field = BooleanField() @@ -39,8 +43,9 @@ class IsNull(BooleanComparison): IsNull('my_field_name') -> my_field_name IS NULL IsNull('my_field_name', negate=True) -> my_field_name IS NOT NULL """ + def __init__(self, field_name, negate=False): - operator = 'IS NOT' if negate else 'IS' + operator = "IS NOT" if negate else "IS" super(IsNull, self).__init__(F(field_name), operator, Value(None)) @@ -55,7 +60,8 @@ class Array(Func): F("other_table__field") ) """ + function = "ARRAY" - template = '%(function)s[%(expressions)s]' - arg_joiner = ', ' + template = "%(function)s[%(expressions)s]" + arg_joiner = ", " arity = None diff --git a/contentcuration/contentcuration/db/models/functions.py b/contentcuration/contentcuration/db/models/functions.py index 0cea0b62c2..9c7e360266 100644 --- a/contentcuration/contentcuration/db/models/functions.py +++ b/contentcuration/contentcuration/db/models/functions.py @@ -19,6 +19,7 @@ class Unnest(Func): 2 | b ... """ + function = "UNNEST" arity = 1 @@ -31,6 +32,7 @@ class ArrayRemove(Func): ArrayRemove(Array(1, 2, 3, None), None) => Array[1, 2, 3] """ + function = "ARRAY_REMOVE" arity = 2 @@ -51,5 +53,6 @@ class JSONObjectKeys(Func): other_key ... """ + function = "JSONB_OBJECT_KEYS" arity = 1 diff --git a/contentcuration/contentcuration/db/models/manager.py b/contentcuration/contentcuration/db/models/manager.py index db1e3a77bf..5121a9e99f 100644 --- a/contentcuration/contentcuration/db/models/manager.py +++ b/contentcuration/contentcuration/db/models/manager.py @@ -67,13 +67,15 @@ def log_lock_time_spent(timespent): "suggested_duration", } -EDIT_ALLOWED_OVERRIDES = ALLOWED_OVERRIDES.union({ - "license_id", - "license_description", - "extra_fields", - "copyright_holder", - "author", -}) +EDIT_ALLOWED_OVERRIDES = ALLOWED_OVERRIDES.union( + { + "license_id", + "license_description", + "extra_fields", + "copyright_holder", + "author", + } +) class CustomContentNodeTreeManager(TreeManager.from_queryset(CustomTreeQuerySet)): @@ -114,14 +116,16 @@ def _attempt_lock(self, tree_ids, shared_tree_ids=None): # This will mean that every process acquires locks in the same order # and should help to minimize deadlocks for tree_id in tree_ids: - advisory_lock(TREE_LOCK, key2=tree_id, shared=tree_id in shared_tree_ids) + advisory_lock( + TREE_LOCK, key2=tree_id, shared=tree_id in shared_tree_ids + ) yield log_lock_time_spent(time.time() - start) @contextlib.contextmanager def lock_mptt(self, *tree_ids, **kwargs): tree_ids = sorted((t for t in set(tree_ids) if t is not None)) - shared_tree_ids = kwargs.pop('shared_tree_ids', []) + shared_tree_ids = kwargs.pop("shared_tree_ids", []) # If this is not inside the context of a delay context manager # or updates are not disabled set a lock on the tree_ids. if ( @@ -229,14 +233,17 @@ def move_node(self, node, target, position="last-child"): self._move_node(node, target, position=position) node.save(skip_lock=True) node_moved.send( - sender=node.__class__, instance=node, target=target, position=position, + sender=node.__class__, + instance=node, + target=target, + position=position, ) # when moving to a new tree, like trash, we'll blanket reset the modified for the # new root and the old root nodes if old_parent.tree_id != target.tree_id: for size_cache in [ ResourceSizeCache(target.get_root()), - ResourceSizeCache(old_parent.get_root()) + ResourceSizeCache(old_parent.get_root()), ]: size_cache.reset_modified(None) @@ -291,7 +298,9 @@ def _clone_node( copy.update(self.get_source_attributes(source)) if isinstance(mods, dict): - allowed_keys = EDIT_ALLOWED_OVERRIDES if can_edit_source_channel else ALLOWED_OVERRIDES + allowed_keys = ( + EDIT_ALLOWED_OVERRIDES if can_edit_source_channel else ALLOWED_OVERRIDES + ) for key, value in mods.items(): if key in copy and key in allowed_keys: copy[key] = value @@ -324,7 +333,12 @@ def _recurse_to_create_tree( mods, ): copy = self._clone_node( - source, parent_id, source_channel_id, can_edit_source_channel, pk, mods, + source, + parent_id, + source_channel_id, + can_edit_source_channel, + pk, + mods, ) if source.kind_id == content_kinds.TOPIC and source.id in nodes_by_parent: @@ -367,7 +381,7 @@ def copy_node( excluded_descendants=None, can_edit_source_channel=None, batch_size=None, - progress_tracker=None + progress_tracker=None, ): """ :type progress_tracker: contentcuration.utils.celery.ProgressTracker|None @@ -503,7 +517,9 @@ def _copy_tags(self, source_copy_id_map): # In the case that we are copying a node that is in the weird state of having a tag # that is duplicated (with a channel tag and a null channel tag) this can cause an error # so we ignore conflicts here to ignore the duplicate tags. - self.model.tags.through.objects.bulk_create(mappings_to_create, ignore_conflicts=True) + self.model.tags.through.objects.bulk_create( + mappings_to_create, ignore_conflicts=True + ) def _copy_assessment_items(self, source_copy_id_map): from contentcuration.models import File @@ -577,7 +593,12 @@ def _shallow_copy( can_edit_source_channel, ): data = self._clone_node( - node, None, source_channel_id, can_edit_source_channel, pk, mods, + node, + None, + source_channel_id, + can_edit_source_channel, + pk, + mods, ) with self.lock_mptt(target.tree_id if target else None): node_copy = self.model(**data) diff --git a/contentcuration/contentcuration/db/models/query.py b/contentcuration/contentcuration/db/models/query.py index 3cd57093dc..0c57d5a6cc 100644 --- a/contentcuration/contentcuration/db/models/query.py +++ b/contentcuration/contentcuration/db/models/query.py @@ -8,7 +8,7 @@ from mptt.querysets import TreeQuerySet -RIGHT_JOIN = 'RIGHT JOIN' +RIGHT_JOIN = "RIGHT JOIN" class CustomTreeQuerySet(TreeQuerySet, CTEQuerySet): @@ -19,11 +19,12 @@ class With(CTEWith): """ Custom CTE class which allows more join types than just INNER and LOUTER (LEFT) """ + def join(self, model_or_queryset, *filter_q, **filter_kw): """ Slight hack to allow more join types """ - join_type = filter_kw.get('_join_type', INNER) + join_type = filter_kw.get("_join_type", INNER) queryset = super(With, self).join(model_or_queryset, *filter_q, **filter_kw) # the underlying Django code forces the join type into INNER or a LEFT OUTER join @@ -40,6 +41,7 @@ class WithValues(With): @see https://www.postgresql.org/docs/9.6/queries-values.html """ + def __init__(self, fields, values_list, name="cte"): super(WithValues, self).__init__(None, name=name) self.query = WithValuesQuery(self) @@ -59,7 +61,9 @@ def _resolve_ref(self, name): class WithValuesSQLCompiler(SQLCompiler): - TEMPLATE = "SELECT * FROM (VALUES {values_statement}) AS {cte_name}({fields_statement})" + TEMPLATE = ( + "SELECT * FROM (VALUES {values_statement}) AS {cte_name}({fields_statement})" + ) def as_sql(self, with_limits=True, with_col_aliases=False): """ @@ -71,12 +75,16 @@ def as_sql(self, with_limits=True, with_col_aliases=False): :return: A tuple of SQL and parameters """ value_parameters = ", ".join(["%s"] * len(self.cte.fields)) - values_statement = ", ".join(["({})".format(value_parameters)] * len(self.cte.values_list)) - fields_statement = ", ".join([self.connection.ops.quote_name(field) for field in list(self.cte.fields)]) + values_statement = ", ".join( + ["({})".format(value_parameters)] * len(self.cte.values_list) + ) + fields_statement = ", ".join( + [self.connection.ops.quote_name(field) for field in list(self.cte.fields)] + ) sql = self.TEMPLATE.format( values_statement=values_statement, cte_name="_{}".format(self.cte.name), - fields_statement=fields_statement + fields_statement=fields_statement, ) return sql, list(sum(self.cte.values_list, ())) @@ -95,6 +103,7 @@ class WithValuesQuery(Query): Note: this does inherit from Query, which we're not passing a Model instance so not all Query functionality is intended to work """ + def __init__(self, cte): super(WithValuesQuery, self).__init__(None) self.cte = cte diff --git a/contentcuration/contentcuration/debug_panel_settings.py b/contentcuration/contentcuration/debug_panel_settings.py deleted file mode 100644 index c097acbbc6..0000000000 --- a/contentcuration/contentcuration/debug_panel_settings.py +++ /dev/null @@ -1,29 +0,0 @@ -from .dev_settings import * # noqa - -# These endpoints will throw an error on the django debug panel. -EXCLUDED_DEBUG_URLS = [ - "/content/storage", - - # Disabling sync API because as soon as the sync API gets polled - # the current request data gets overwritten. - # Can be removed after websockets deployment. - "/api/sync", -] - -DEBUG_PANEL_ACTIVE = True - - -def custom_show_toolbar(request): - return not any( - request.path.startswith(url) for url in EXCLUDED_DEBUG_URLS - ) # noqa F405 - - -# if debug_panel exists, add it to our INSTALLED_APPS. -INSTALLED_APPS += ("debug_panel", "debug_toolbar", "pympler") # noqa F405 -MIDDLEWARE += ( # noqa F405 - "debug_toolbar.middleware.DebugToolbarMiddleware", -) -DEBUG_TOOLBAR_CONFIG = { - "SHOW_TOOLBAR_CALLBACK": custom_show_toolbar, -} diff --git a/contentcuration/contentcuration/decorators.py b/contentcuration/contentcuration/decorators.py index 9c51e83b7a..eb6fa4fea3 100644 --- a/contentcuration/contentcuration/decorators.py +++ b/contentcuration/contentcuration/decorators.py @@ -69,6 +69,7 @@ class DelayUserStorageCalculation(ContextDecorator): Decorator class that will dedupe and delay requests to enqueue storage calculation tasks for users until after the wrapped function has exited """ + depth = 0 queue = [] @@ -85,6 +86,7 @@ def __enter__(self): def __exit__(self, exc_type, exc_val, exc_tb): from contentcuration.utils.user import calculate_user_storage + self.depth -= 1 if not self.is_active: user_ids = set(self.queue) diff --git a/contentcuration/contentcuration/dev_settings.py b/contentcuration/contentcuration/dev_settings.py index 439bdef8af..d81d23a993 100644 --- a/contentcuration/contentcuration/dev_settings.py +++ b/contentcuration/contentcuration/dev_settings.py @@ -5,4 +5,4 @@ ROOT_URLCONF = "contentcuration.dev_urls" -INSTALLED_APPS += ("drf_yasg", "automation") +INSTALLED_APPS += ("drf_yasg",) diff --git a/contentcuration/contentcuration/dev_urls.py b/contentcuration/contentcuration/dev_urls.py index afbb7a83f8..77cbddfbac 100644 --- a/contentcuration/contentcuration/dev_urls.py +++ b/contentcuration/contentcuration/dev_urls.py @@ -8,6 +8,7 @@ from django.urls import include from django.urls import path from django.urls import re_path +from django.views.generic import TemplateView from drf_yasg import openapi from drf_yasg.views import get_schema_view from rest_framework import permissions @@ -33,7 +34,7 @@ def file_server(request, storage_path=None): return HttpResponseNotFound() params = urllib.parse.urlparse(default_storage.url(storage_path)).query - host = request.META['HTTP_HOST'].split(":")[0] + host = request.META["HTTP_HOST"].split(":")[0] port = 9000 # hardcoded to the default minio IP address url = "http://{host}:{port}/{bucket}/{path}?{params}".format( host=host, @@ -58,7 +59,7 @@ def file_server(request, storage_path=None): urlpatterns = urlpatterns + [ re_path(r"^__open-in-editor/", webpack_redirect_view), - path('admin/', admin.site.urls), + path("admin/", admin.site.urls), re_path( r"^swagger(?P\.json|\.yaml)$", schema_view.without_ui(cache_timeout=0), @@ -76,8 +77,9 @@ def file_server(request, storage_path=None): re_path(r"^content/(?P.+)$", file_server), ] -if getattr(settings, "DEBUG_PANEL_ACTIVE", False): - - import debug_toolbar - - urlpatterns = [re_path(r"^__debug__/", include(debug_toolbar.urls))] + urlpatterns +urlpatterns += [ + re_path( + r"^editor-dev(?:/.*)?$", + TemplateView.as_view(template_name="contentcuration/editor_dev.html"), + ), +] diff --git a/contentcuration/contentcuration/forms.py b/contentcuration/contentcuration/forms.py index 8e9320d85f..2c761af080 100644 --- a/contentcuration/contentcuration/forms.py +++ b/contentcuration/contentcuration/forms.py @@ -13,14 +13,14 @@ from contentcuration.models import User -REGISTRATION_SALT = getattr(settings, 'REGISTRATION_SALT', 'registration') +REGISTRATION_SALT = getattr(settings, "REGISTRATION_SALT", "registration") # LOGIN/REGISTRATION FORMS ################################################################# class RegistrationForm(UserCreationForm): - CODE_ACCOUNT_ACTIVE = 'account_active' - CODE_ACCOUNT_INACTIVE = 'account_inactive' + CODE_ACCOUNT_ACTIVE = "account_active" + CODE_ACCOUNT_INACTIVE = "account_inactive" first_name = forms.CharField(required=True) last_name = forms.CharField(required=True) @@ -43,9 +43,13 @@ def clean_email(self): user_qs = User.objects.filter(email__iexact=email) if user_qs.exists(): if user_qs.filter(Q(is_active=True) | Q(deleted=True)).exists(): - raise ValidationError("Account already active", code=self.CODE_ACCOUNT_ACTIVE) + raise ValidationError( + "Account already active", code=self.CODE_ACCOUNT_ACTIVE + ) else: - raise ValidationError("Already registered.", code=self.CODE_ACCOUNT_INACTIVE) + raise ValidationError( + "Already registered.", code=self.CODE_ACCOUNT_INACTIVE + ) return email def save(self, commit=True): @@ -53,12 +57,12 @@ def save(self, commit=True): user.first_name = self.cleaned_data["first_name"] user.last_name = self.cleaned_data["last_name"] user.information = { - "uses": self.cleaned_data['uses'].split('|'), - "locations": self.cleaned_data['locations'].split('|'), - "space_needed": self.cleaned_data['storage'], - "heard_from": self.cleaned_data['source'], + "uses": self.cleaned_data["uses"].split("|"), + "locations": self.cleaned_data["locations"].split("|"), + "space_needed": self.cleaned_data["storage"], + "heard_from": self.cleaned_data["source"], } - user.policies = json.loads(self.cleaned_data['policies']) + user.policies = json.loads(self.cleaned_data["policies"]) if commit: user.save() @@ -67,7 +71,7 @@ def save(self, commit=True): class Meta: model = User - fields = ('first_name', 'last_name', 'email') + fields = ("first_name", "last_name", "email") class ForgotPasswordForm(PasswordResetForm): @@ -82,40 +86,57 @@ def save(self, request=None, extra_email_context=None, **kwargs): user = User.get_for_email(email) if user and user.is_active: - super(ForgotPasswordForm, self).save(request=request, extra_email_context=extra_email_context, **kwargs) + super(ForgotPasswordForm, self).save( + request=request, extra_email_context=extra_email_context, **kwargs + ) elif user: # For users who were invited but hadn't registered yet if not user.password: context = { - 'site': extra_email_context.get('site'), - 'user': user, - 'domain': extra_email_context.get('domain'), + "site": extra_email_context.get("site"), + "user": user, + "domain": extra_email_context.get("domain"), } - subject = render_to_string('registration/password_reset_subject.txt', context) - subject = ''.join(subject.splitlines()) - message = render_to_string('registration/registration_needed_email.txt', context) - user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL, ) + subject = render_to_string( + "registration/password_reset_subject.txt", context + ) + subject = "".join(subject.splitlines()) + message = render_to_string( + "registration/registration_needed_email.txt", context + ) + user.email_user( + subject, + message, + settings.DEFAULT_FROM_EMAIL, + ) else: activation_key = self.get_activation_key(user) context = { - 'activation_key': activation_key, - 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS, - 'site': extra_email_context.get('site'), - 'user': user, - 'domain': extra_email_context.get('domain'), + "activation_key": activation_key, + "expiration_days": settings.ACCOUNT_ACTIVATION_DAYS, + "site": extra_email_context.get("site"), + "user": user, + "domain": extra_email_context.get("domain"), } - subject = render_to_string('registration/password_reset_subject.txt', context) - subject = ''.join(subject.splitlines()) - message = render_to_string('registration/activation_needed_email.txt', context) - user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL, ) + subject = render_to_string( + "registration/password_reset_subject.txt", context + ) + subject = "".join(subject.splitlines()) + message = render_to_string( + "registration/activation_needed_email.txt", context + ) + user.email_user( + subject, + message, + settings.DEFAULT_FROM_EMAIL, + ) def get_activation_key(self, user): """ Generate the activation key which will be emailed to the user. """ return signing.dumps( - obj=getattr(user, user.USERNAME_FIELD), - salt=REGISTRATION_SALT + obj=getattr(user, user.USERNAME_FIELD), salt=REGISTRATION_SALT ) @@ -124,11 +145,11 @@ class PolicyAcceptForm(forms.Form): class Meta: model = User - fields = ('accepted', 'policy_names') + fields = ("accepted", "policy_names") def save(self, user): user.policies = user.policies or {} - user.policies.update(json.loads(self.cleaned_data['policy'])) + user.policies.update(json.loads(self.cleaned_data["policy"])) user.save() return user @@ -141,8 +162,8 @@ class UsernameChangeForm(UserChangeForm): class Meta: model = User - fields = ('first_name', 'last_name') - exclude = ('password', 'email') + fields = ("first_name", "last_name") + exclude = ("password", "email") def clean_password(self): return True @@ -179,8 +200,23 @@ class StorageRequestForm(forms.Form): message = forms.CharField(required=True) class Meta: - fields = ("storage", "kind", "resource_count", "resource_size", "creators", "sample_link", "license", "public", - "audience", "import_count", "location", "uploading_for", "organization_type", "time_constraint", "message") + fields = ( + "storage", + "kind", + "resource_count", + "resource_size", + "creators", + "sample_link", + "license", + "public", + "audience", + "import_count", + "location", + "uploading_for", + "organization_type", + "time_constraint", + "message", + ) class IssueReportForm(forms.Form): @@ -201,7 +237,7 @@ def __init__(self, user, *args, **kwargs): super(DeleteAccountForm, self).__init__(*args, **kwargs) def clean_email(self): - email = self.cleaned_data['email'].strip().lower() - if self.user.is_admin or self.user.email.lower() != self.cleaned_data['email']: + email = self.cleaned_data["email"].strip().lower() + if self.user.is_admin or self.user.email.lower() != self.cleaned_data["email"]: raise ValidationError("Not allowed") return email diff --git a/contentcuration/contentcuration/frontend/RecommendedResourceCard/components/RecommendedResourceCard.vue b/contentcuration/contentcuration/frontend/RecommendedResourceCard/components/RecommendedResourceCard.vue deleted file mode 100644 index 4052c50a77..0000000000 --- a/contentcuration/contentcuration/frontend/RecommendedResourceCard/components/RecommendedResourceCard.vue +++ /dev/null @@ -1,79 +0,0 @@ - - - - - - - diff --git a/contentcuration/contentcuration/frontend/accounts/components/MessageLayout.vue b/contentcuration/contentcuration/frontend/accounts/components/MessageLayout.vue index 869ccf3a88..096f3a1205 100644 --- a/contentcuration/contentcuration/frontend/accounts/components/MessageLayout.vue +++ b/contentcuration/contentcuration/frontend/accounts/components/MessageLayout.vue @@ -1,15 +1,19 @@ + - \ No newline at end of file + diff --git a/contentcuration/contentcuration/frontend/accounts/pages/resetPassword/ForgotPassword.vue b/contentcuration/contentcuration/frontend/accounts/pages/resetPassword/ForgotPassword.vue index 7349cf97ff..907f26d265 100644 --- a/contentcuration/contentcuration/frontend/accounts/pages/resetPassword/ForgotPassword.vue +++ b/contentcuration/contentcuration/frontend/accounts/pages/resetPassword/ForgotPassword.vue @@ -4,9 +4,21 @@ :header="$tr('forgotPasswordTitle')" :text="$tr('forgotPasswordPrompt')" > - - - + + + - \ No newline at end of file + diff --git a/contentcuration/contentcuration/frontend/accounts/pages/resetPassword/ResetPassword.vue b/contentcuration/contentcuration/frontend/accounts/pages/resetPassword/ResetPassword.vue index 65e69270c9..f7149673d5 100644 --- a/contentcuration/contentcuration/frontend/accounts/pages/resetPassword/ResetPassword.vue +++ b/contentcuration/contentcuration/frontend/accounts/pages/resetPassword/ResetPassword.vue @@ -4,8 +4,17 @@ :header="$tr('resetPasswordTitle')" :text="$tr('resetPasswordPrompt')" > - - + + + - diff --git a/contentcuration/contentcuration/frontend/administration/components/ConfirmationDialog.vue b/contentcuration/contentcuration/frontend/administration/components/ConfirmationDialog.vue index ca87915d68..ff5c050742 100644 --- a/contentcuration/contentcuration/frontend/administration/components/ConfirmationDialog.vue +++ b/contentcuration/contentcuration/frontend/administration/components/ConfirmationDialog.vue @@ -6,10 +6,19 @@ :text="text" > @@ -62,5 +71,4 @@ - + diff --git a/contentcuration/contentcuration/frontend/administration/components/__tests__/clipboardChip.spec.js b/contentcuration/contentcuration/frontend/administration/components/__tests__/clipboardChip.spec.js index a696e950c4..143c861d46 100644 --- a/contentcuration/contentcuration/frontend/administration/components/__tests__/clipboardChip.spec.js +++ b/contentcuration/contentcuration/frontend/administration/components/__tests__/clipboardChip.spec.js @@ -1,8 +1,10 @@ import { mount } from '@vue/test-utils'; import ClipboardChip from '../ClipboardChip.vue'; +import { factory } from '../../store'; function makeWrapper() { return mount(ClipboardChip, { + store: factory(), propsData: { value: 'testtoken', }, @@ -11,19 +13,20 @@ function makeWrapper() { describe('clipboardChip', () => { let wrapper; + beforeEach(() => { navigator.clipboard = { - writeText: jest.fn(), + writeText: jest.fn().mockImplementation(() => Promise.resolve()), }; wrapper = makeWrapper(); }); + afterEach(() => { delete navigator.clipboard; }); - it('should fire a copy operation on button click', () => { - const copyToClipboard = jest.fn(); - wrapper.setMethods({ copyToClipboard }); - wrapper.find('[data-test="copy"]').trigger('click'); - expect(copyToClipboard).toHaveBeenCalled(); + + it('should fire a copy operation on button click', async () => { + await wrapper.findComponent({ ref: 'copyButton' }).trigger('click'); + expect(navigator.clipboard.writeText).toHaveBeenCalled(); }); }); diff --git a/contentcuration/contentcuration/frontend/administration/mixins.js b/contentcuration/contentcuration/frontend/administration/mixins.js index c27e933ed5..22199e0bfb 100644 --- a/contentcuration/contentcuration/frontend/administration/mixins.js +++ b/contentcuration/contentcuration/frontend/administration/mixins.js @@ -51,7 +51,7 @@ export function generateFilterMixin(filterMap) { result[key] = this.$route.query[key]; return result; }, - {} + {}, ); // Set the router with the params from the filterMap and current route @@ -88,7 +88,7 @@ export function generateFilterMixin(filterMap) { result[key] = value; } }, - {} + {}, ); this.$router.push({ query }).catch(error => { if (error && error.name != 'NavigationDuplicated') { @@ -96,7 +96,7 @@ export function generateFilterMixin(filterMap) { } }); }, - clearSearch: function() { + clearSearch: function () { this.keywords = ''; }, updateKeywords() { @@ -140,7 +140,7 @@ export const tableMixin = { }, (value, key) => { return value !== null && key !== 'rowsPerPage' && key !== 'totalItems'; - } + }, ); this.$router diff --git a/contentcuration/contentcuration/frontend/administration/pages/AdministrationIndex.vue b/contentcuration/contentcuration/frontend/administration/pages/AdministrationIndex.vue index e6589334da..09f7ac3455 100644 --- a/contentcuration/contentcuration/frontend/administration/pages/AdministrationIndex.vue +++ b/contentcuration/contentcuration/frontend/administration/pages/AdministrationIndex.vue @@ -2,7 +2,10 @@ - - - + @@ -225,5 +226,4 @@ - + diff --git a/contentcuration/contentcuration/frontend/administration/pages/Channels/ChannelDetails.vue b/contentcuration/contentcuration/frontend/administration/pages/Channels/ChannelDetails.vue index d2b166bf8b..470a807b9e 100644 --- a/contentcuration/contentcuration/frontend/administration/pages/Channels/ChannelDetails.vue +++ b/contentcuration/contentcuration/frontend/administration/pages/Channels/ChannelDetails.vue @@ -2,23 +2,47 @@ - + - + This channel has been deleted @@ -31,8 +55,11 @@ @deleted="dialog = false" /> - -
+ - + @@ -50,13 +80,14 @@ + + diff --git a/contentcuration/contentcuration/frontend/administration/pages/Channels/ChannelTable.vue b/contentcuration/contentcuration/frontend/administration/pages/Channels/ChannelTable.vue index cc0a438725..1fb7d08010 100644 --- a/contentcuration/contentcuration/frontend/administration/pages/Channels/ChannelTable.vue +++ b/contentcuration/contentcuration/frontend/administration/pages/Channels/ChannelTable.vue @@ -4,8 +4,16 @@

{{ `${$formatNumber(count)} ${count === 1 ? 'channel' : 'channels'}` }}

- - + + - + - + @@ -210,7 +234,11 @@ }, methods: { ...mapActions('channelAdmin', ['loadChannels', 'getAdminChannelListDetails']), - /* @public - used in generated filterMixin */ + /** + * @public + * @param params + * @return {*} + */ fetch(params) { return this.loadChannels(params); }, diff --git a/contentcuration/contentcuration/frontend/administration/pages/Channels/__tests__/channelActionsDropdown.spec.js b/contentcuration/contentcuration/frontend/administration/pages/Channels/__tests__/channelActionsDropdown.spec.js index cf63c1bb23..01113d2371 100644 --- a/contentcuration/contentcuration/frontend/administration/pages/Channels/__tests__/channelActionsDropdown.spec.js +++ b/contentcuration/contentcuration/frontend/administration/pages/Channels/__tests__/channelActionsDropdown.spec.js @@ -6,7 +6,6 @@ import ChannelActionsDropdown from '../ChannelActionsDropdown'; const store = factory(); const channelId = '11111111111111111111111111111111'; -const updateChannel = jest.fn().mockReturnValue(Promise.resolve()); const channel = { id: channelId, name: 'Channel Test', @@ -21,7 +20,24 @@ const channel = { }; function makeWrapper(channelProps = {}) { - return mount(ChannelActionsDropdown, { + const mocks = { + restore() { + for (const key of Object.keys(this)) { + if (key === 'restore') continue; + this[key].mockRestore(); + } + }, + }; + mocks.downloadPDF = jest.spyOn(ChannelActionsDropdown.methods, 'downloadPDF').mockResolvedValue(); + mocks.downloadCSV = jest.spyOn(ChannelActionsDropdown.methods, 'downloadCSV').mockResolvedValue(); + mocks.deleteChannel = jest + .spyOn(ChannelActionsDropdown.methods, 'deleteChannel') + .mockResolvedValue(); + mocks.updateChannel = jest + .spyOn(ChannelActionsDropdown.methods, 'updateChannel') + .mockResolvedValue(); + + const wrapper = mount(ChannelActionsDropdown, { router, store, propsData: { channelId }, @@ -33,83 +49,95 @@ function makeWrapper(channelProps = {}) { }; }, }, - methods: { updateChannel }, }); + + return [wrapper, mocks]; } describe('channelActionsDropdown', () => { - let wrapper; - beforeEach(() => { - updateChannel.mockClear(); + let wrapper, mocks; + + afterEach(() => { + if (mocks) { + mocks.restore(); + } }); describe('deleted channel actions', () => { beforeEach(() => { - wrapper = makeWrapper({ deleted: true }); + [wrapper, mocks] = makeWrapper({ deleted: true }); }); - it('restore channel should open restore confirmation', () => { - wrapper.find('[data-test="restore"]').trigger('click'); + + it('restore channel should open restore confirmation', async () => { + await wrapper.findComponent('[data-test="restore"]').trigger('click'); expect(wrapper.vm.restoreDialog).toBe(true); }); + it('confirm restore channel should call updateChannel with deleted = false', () => { - wrapper.find('[data-test="confirm-restore"]').vm.$emit('confirm'); - expect(updateChannel).toHaveBeenCalledWith({ id: channelId, deleted: false }); + wrapper.findComponent('[data-test="confirm-restore"]').vm.$emit('confirm'); + expect(mocks.updateChannel).toHaveBeenCalledWith({ id: channelId, deleted: false }); }); - it('delete channel should open delete confirmation', () => { - wrapper.find('[data-test="delete"]').trigger('click'); + + it('delete channel should open delete confirmation', async () => { + await wrapper.findComponent('[data-test="delete"]').trigger('click'); expect(wrapper.vm.deleteDialog).toBe(true); }); + it('confirm delete channel should call deleteChannel', () => { - const deleteChannel = jest.fn().mockReturnValue(Promise.resolve()); - wrapper.setMethods({ deleteChannel }); - wrapper.find('[data-test="confirm-delete"]').vm.$emit('confirm'); - expect(deleteChannel).toHaveBeenCalledWith(channelId); + wrapper.findComponent('[data-test="confirm-delete"]').vm.$emit('confirm'); + expect(mocks.deleteChannel).toHaveBeenCalledWith(channelId); }); }); + describe('live channel actions', () => { beforeEach(() => { - wrapper = makeWrapper({ public: false, deleted: false }); + [wrapper, mocks] = makeWrapper({ public: false, deleted: false }); }); - it('download PDF button should call downloadPDF', () => { - const downloadPDF = jest.fn(); - wrapper.setMethods({ downloadPDF }); - wrapper.find('[data-test="pdf"]').trigger('click'); - expect(downloadPDF).toHaveBeenCalled(); + + it('download PDF button should call downloadPDF', async () => { + await wrapper.findComponent('[data-test="pdf"]').trigger('click'); + expect(mocks.downloadPDF).toHaveBeenCalled(); }); - it('download CSV button should call downloadCSV', () => { - const downloadCSV = jest.fn(); - wrapper.setMethods({ downloadCSV }); - wrapper.find('[data-test="csv"]').trigger('click'); - expect(downloadCSV).toHaveBeenCalled(); + + it('download CSV button should call downloadCSV', async () => { + await wrapper.findComponent('[data-test="csv"]').trigger('click'); + expect(mocks.downloadCSV).toHaveBeenCalled(); }); - it('make public button should open make public confirmation', () => { - wrapper.find('[data-test="public"]').trigger('click'); + + it('make public button should open make public confirmation', async () => { + await wrapper.findComponent('[data-test="public"]').trigger('click'); expect(wrapper.vm.makePublicDialog).toBe(true); }); + it('confirm make public should call updateChannel with isPublic = true', () => { - wrapper.find('[data-test="confirm-public"]').vm.$emit('confirm'); - expect(updateChannel).toHaveBeenCalledWith({ id: channelId, isPublic: true }); + wrapper.findComponent('[data-test="confirm-public"]').vm.$emit('confirm'); + expect(mocks.updateChannel).toHaveBeenCalledWith({ id: channelId, isPublic: true }); }); - it('soft delete button should open soft delete confirmation', () => { - wrapper.find('[data-test="softdelete"]').trigger('click'); + + it('soft delete button should open soft delete confirmation', async () => { + await wrapper.findComponent('[data-test="softdelete"]').trigger('click'); expect(wrapper.vm.softDeleteDialog).toBe(true); }); + it('confirm soft delete button should call updateChannel with deleted = true', () => { - wrapper.find('[data-test="confirm-softdelete"]').vm.$emit('confirm'); - expect(updateChannel).toHaveBeenCalledWith({ id: channelId, deleted: true }); + wrapper.findComponent('[data-test="confirm-softdelete"]').vm.$emit('confirm'); + expect(mocks.updateChannel).toHaveBeenCalledWith({ id: channelId, deleted: true }); }); }); + describe('public channel actions', () => { beforeEach(() => { - wrapper = makeWrapper(); + [wrapper, mocks] = makeWrapper(); }); - it('make private button should open make private confirmation', () => { - wrapper.find('[data-test="private"]').trigger('click'); + + it('make private button should open make private confirmation', async () => { + await wrapper.findComponent('[data-test="private"]').trigger('click'); expect(wrapper.vm.makePrivateDialog).toBe(true); }); + it('confirm make private should call updateChannel with isPublic = false', () => { - wrapper.find('[data-test="confirm-private"]').vm.$emit('confirm'); - expect(updateChannel).toHaveBeenCalledWith({ id: channelId, isPublic: false }); + wrapper.findComponent('[data-test="confirm-private"]').vm.$emit('confirm'); + expect(mocks.updateChannel).toHaveBeenCalledWith({ id: channelId, isPublic: false }); }); }); }); diff --git a/contentcuration/contentcuration/frontend/administration/pages/Channels/__tests__/channelDetails.spec.js b/contentcuration/contentcuration/frontend/administration/pages/Channels/__tests__/channelDetails.spec.js index 08e2099eed..aea277c0af 100644 --- a/contentcuration/contentcuration/frontend/administration/pages/Channels/__tests__/channelDetails.spec.js +++ b/contentcuration/contentcuration/frontend/administration/pages/Channels/__tests__/channelDetails.spec.js @@ -32,44 +32,47 @@ function makeWrapper() { describe('channelDetails', () => { let wrapper; + beforeEach(() => { wrapper = makeWrapper(); }); + it('clicking close should close the modal', () => { wrapper.vm.dialog = false; expect(wrapper.vm.$route.name).toBe(RouteNames.CHANNELS); }); + describe('load', () => { - it('should automatically close if loadChannel does not find a channel', () => { - wrapper.setMethods({ - loadChannel: jest.fn().mockReturnValue(Promise.resolve()), - loadChannelDetails: jest.fn().mockReturnValue(Promise.resolve()), - }); - return wrapper.vm.load().then(() => { - expect(wrapper.vm.$route.name).toBe(RouteNames.CHANNELS); - }); + it('should automatically close if loadChannel does not find a channel', async () => { + const loadChannel = jest.spyOn(wrapper.vm, 'loadChannel'); + loadChannel.mockReturnValue(Promise.resolve()); + const loadChannelDetails = jest.spyOn(wrapper.vm, 'loadChannelDetails'); + loadChannelDetails.mockReturnValue(Promise.resolve()); + await wrapper.vm.load(); + expect(wrapper.vm.$route.name).toBe(RouteNames.CHANNELS); }); - it('load should call loadChannel and loadChannelDetails', () => { - const loadChannel = jest.fn().mockReturnValue(Promise.resolve({ id: channelId })); - const loadChannelDetails = jest.fn().mockReturnValue(Promise.resolve()); - wrapper.setMethods({ loadChannel, loadChannelDetails }); - return wrapper.vm.load().then(() => { - expect(loadChannel).toHaveBeenCalled(); - expect(loadChannelDetails).toHaveBeenCalled(); - }); + + it('load should call loadChannel and loadChannelDetails', async () => { + const loadChannel = jest.spyOn(wrapper.vm, 'loadChannel'); + loadChannel.mockReturnValue(Promise.resolve({ id: channelId })); + const loadChannelDetails = jest.spyOn(wrapper.vm, 'loadChannelDetails'); + loadChannelDetails.mockReturnValue(Promise.resolve()); + await wrapper.vm.load(); + expect(loadChannel).toHaveBeenCalled(); + expect(loadChannelDetails).toHaveBeenCalled(); }); }); - it('clicking info tab should navigate to info tab', () => { + + it('clicking info tab should navigate to info tab', async () => { wrapper.vm.tab = 'share'; - wrapper.find('[data-test="info-tab"] a').trigger('click'); - wrapper.vm.$nextTick(() => { - expect(wrapper.vm.tab).toBe('info'); - }); + await wrapper.findComponent('[data-test="info-tab"] a').trigger('click'); + await wrapper.vm.$nextTick(); + expect(wrapper.vm.tab).toBe('info'); }); - it('clicking share tab should navigate to share tab', () => { - wrapper.find('[data-test="share-tab"] a').trigger('click'); - wrapper.vm.$nextTick(() => { - expect(wrapper.vm.tab).toBe('share'); - }); + + it('clicking share tab should navigate to share tab', async () => { + await wrapper.find('[data-test="share-tab"] a').trigger('click'); + await wrapper.vm.$nextTick(); + expect(wrapper.vm.tab).toBe('share'); }); }); diff --git a/contentcuration/contentcuration/frontend/administration/pages/Channels/__tests__/channelItem.spec.js b/contentcuration/contentcuration/frontend/administration/pages/Channels/__tests__/channelItem.spec.js index acd7dc8fb5..9272eb90fd 100644 --- a/contentcuration/contentcuration/frontend/administration/pages/Channels/__tests__/channelItem.spec.js +++ b/contentcuration/contentcuration/frontend/administration/pages/Channels/__tests__/channelItem.spec.js @@ -42,36 +42,39 @@ function makeWrapper() { describe('channelItem', () => { let wrapper; + beforeEach(() => { wrapper = makeWrapper(); }); + it('selecting the channel should emit list with channel id', () => { wrapper.vm.selected = true; expect(wrapper.emitted('input')[0][0]).toEqual([channelId]); }); - it('deselecting the channel should emit list without channel id', () => { - wrapper.setProps({ value: [channelId] }); + + it('deselecting the channel should emit list without channel id', async () => { + await wrapper.setProps({ value: [channelId] }); wrapper.vm.selected = false; expect(wrapper.emitted('input')[0][0]).toEqual([]); }); - it('saveDemoServerUrl should call updateChannel with new demo_server_url', () => { - const updateChannel = jest.fn().mockReturnValue(Promise.resolve()); - wrapper.setMethods({ updateChannel }); - return wrapper.vm.saveDemoServerUrl().then(() => { - expect(updateChannel).toHaveBeenCalledWith({ - id: channelId, - demo_server_url: channel.demo_server_url, - }); + + it('saveDemoServerUrl should call updateChannel with new demo_server_url', async () => { + const updateChannel = jest.spyOn(wrapper.vm, 'updateChannel'); + updateChannel.mockReturnValue(Promise.resolve()); + await wrapper.vm.saveDemoServerUrl(); + expect(updateChannel).toHaveBeenCalledWith({ + id: channelId, + demo_server_url: channel.demo_server_url, }); }); - it('saveSourceUrl should call updateChannel with new source_url', () => { - const updateChannel = jest.fn().mockReturnValue(Promise.resolve()); - wrapper.setMethods({ updateChannel }); - return wrapper.vm.saveSourceUrl().then(() => { - expect(updateChannel).toHaveBeenCalledWith({ - id: channelId, - source_url: channel.source_url, - }); + + it('saveSourceUrl should call updateChannel with new source_url', async () => { + const updateChannel = jest.spyOn(wrapper.vm, 'updateChannel'); + updateChannel.mockReturnValue(Promise.resolve()); + await wrapper.vm.saveSourceUrl(); + expect(updateChannel).toHaveBeenCalledWith({ + id: channelId, + source_url: channel.source_url, }); }); }); diff --git a/contentcuration/contentcuration/frontend/administration/pages/Channels/__tests__/channelTable.spec.js b/contentcuration/contentcuration/frontend/administration/pages/Channels/__tests__/channelTable.spec.js index 1ced780018..c300285443 100644 --- a/contentcuration/contentcuration/frontend/administration/pages/Channels/__tests__/channelTable.spec.js +++ b/contentcuration/contentcuration/frontend/administration/pages/Channels/__tests__/channelTable.spec.js @@ -6,14 +6,19 @@ import ChannelTable from '../ChannelTable'; const store = factory(); -const loadChannels = jest.fn().mockReturnValue(Promise.resolve()); const channelList = ['test', 'channel', 'table']; +let loadItems; + function makeWrapper() { + loadItems = jest.spyOn(ChannelTable.mixins[0].methods, '_loadItems'); + loadItems.mockImplementation(() => Promise.resolve()); + router.replace({ name: RouteNames.CHANNELS }); + return mount(ChannelTable, { router, store, - sync: false, + attachTo: document.body, computed: { count() { return 10; @@ -22,9 +27,6 @@ function makeWrapper() { return channelList; }, }, - methods: { - loadChannels, - }, stubs: { ChannelItem: true, }, @@ -33,9 +35,13 @@ function makeWrapper() { describe('channelTable', () => { let wrapper; + beforeEach(() => { wrapper = makeWrapper(); }); + afterEach(() => { + loadItems.mockRestore(); + }); describe('filters', () => { it('changing filter should set query params', () => { wrapper.vm.filter = 'public'; @@ -55,18 +61,17 @@ describe('channelTable', () => { wrapper.vm.selectAll = true; expect(wrapper.vm.selected).toEqual(channelList); }); - it('removing selectAll should set selected to empty list', () => { + it('removing selectAll should set selected to empty list', async () => { wrapper.vm.selected = channelList; wrapper.vm.selectAll = false; - wrapper.vm.$nextTick(() => { - expect(wrapper.vm.selected).toEqual([]); - }); + await wrapper.vm.$nextTick(); + expect(wrapper.vm.selected).toEqual([]); }); it('selectedCount should match the selected length', () => { wrapper.vm.selected = ['test']; expect(wrapper.vm.selectedCount).toBe(1); }); - it('selected should clear on query changes', () => { + it('selected should clear on query changes', async () => { wrapper.vm.selected = ['test']; router.push({ ...wrapper.vm.$route, @@ -74,9 +79,8 @@ describe('channelTable', () => { param: 'test', }, }); - wrapper.vm.$nextTick(() => { - expect(wrapper.vm.selected).toEqual([]); - }); + await wrapper.vm.$nextTick(); + expect(wrapper.vm.selected).toEqual([]); }); }); describe('bulk actions', () => { @@ -84,30 +88,27 @@ describe('channelTable', () => { expect(wrapper.find('[data-test="csv"]').exists()).toBe(false); expect(wrapper.find('[data-test="pdf"]').exists()).toBe(false); }); - it('should be visible if items are selected', () => { + it('should be visible if items are selected', async () => { wrapper.vm.selected = channelList; - wrapper.vm.$nextTick(() => { - expect(wrapper.find('[data-test="csv"]').exists()).toBe(true); - expect(wrapper.find('[data-test="pdf"]').exists()).toBe(true); - }); + await wrapper.vm.$nextTick(); + expect(wrapper.find('[data-test="csv"]').exists()).toBe(true); + expect(wrapper.find('[data-test="pdf"]').exists()).toBe(true); }); - it('download PDF should call downloadPDF', () => { - const downloadPDF = jest.fn(); - wrapper.setMethods({ downloadPDF }); + it('download PDF should call downloadPDF', async () => { + const downloadPDF = jest.spyOn(wrapper.vm, 'downloadPDF'); + downloadPDF.mockImplementation(() => Promise.resolve()); wrapper.vm.selected = channelList; - wrapper.vm.$nextTick(() => { - wrapper.find('[data-test="pdf"] .v-btn').trigger('click'); - expect(downloadPDF).toHaveBeenCalled(); - }); + await wrapper.vm.$nextTick(); + wrapper.findComponent('[data-test="pdf"]').trigger('click'); + expect(downloadPDF).toHaveBeenCalled(); }); - it('download CSV should call downloadCSV', () => { - const downloadCSV = jest.fn(); - wrapper.setMethods({ downloadCSV }); + it('download CSV should call downloadCSV', async () => { + const downloadCSV = jest.spyOn(wrapper.vm, 'downloadCSV'); + downloadCSV.mockImplementation(() => Promise.resolve()); wrapper.vm.selected = channelList; - wrapper.vm.$nextTick(() => { - wrapper.find('[data-test="csv"] .v-btn').trigger('click'); - expect(downloadCSV).toHaveBeenCalled(); - }); + await wrapper.vm.$nextTick(); + wrapper.findComponent('[data-test="csv"]').trigger('click'); + expect(downloadCSV).toHaveBeenCalled(); }); }); }); diff --git a/contentcuration/contentcuration/frontend/administration/pages/Users/EmailUsersDialog.vue b/contentcuration/contentcuration/frontend/administration/pages/Users/EmailUsersDialog.vue index 433ede42c3..a77085c5d1 100644 --- a/contentcuration/contentcuration/frontend/administration/pages/Users/EmailUsersDialog.vue +++ b/contentcuration/contentcuration/frontend/administration/pages/Users/EmailUsersDialog.vue @@ -1,14 +1,28 @@ + + + diff --git a/contentcuration/contentcuration/frontend/shared/views/policies/__tests__/policiesModal.spec.js b/contentcuration/contentcuration/frontend/shared/views/policies/__tests__/policiesModal.spec.js index 41f29a1890..99ca5a3dd5 100644 --- a/contentcuration/contentcuration/frontend/shared/views/policies/__tests__/policiesModal.spec.js +++ b/contentcuration/contentcuration/frontend/shared/views/policies/__tests__/policiesModal.spec.js @@ -5,15 +5,20 @@ import { policies, policyDates } from 'shared/constants'; describe('PoliciesModal', () => { it('smoke test', () => { - const wrapper = mount(PoliciesModal); + const wrapper = mount(PoliciesModal, { + propsData: { + policy: policies.TERMS_OF_SERVICE, + }, + }); - expect(wrapper.isVueInstance()).toBe(true); + expect(wrapper.exists()).toBe(true); }); it('renders a policy title', () => { const wrapper = mount(PoliciesModal, { propsData: { title: 'Updated Terms Of Service', + policy: policies.TERMS_OF_SERVICE, }, }); @@ -38,6 +43,7 @@ describe('PoliciesModal', () => { wrapper = mount(PoliciesModal, { propsData: { needsAcceptance: false, + policy: policies.TERMS_OF_SERVICE, }, }); }); @@ -70,6 +76,7 @@ describe('PoliciesModal', () => { wrapper = mount(PoliciesModal, { propsData: { needsAcceptance: true, + policy: policies.TERMS_OF_SERVICE, }, }); }); @@ -91,7 +98,7 @@ describe('PoliciesModal', () => { describe('when accept policy checkbox is not checked', () => { it('disable continue button', () => { expect(wrapper.find('[data-test="continue-button"]').attributes().disabled).toEqual( - 'disabled' + 'disabled', ); }); }); diff --git a/contentcuration/contentcuration/frontend/shared/views/policies/__tests__/policyModals.spec.js b/contentcuration/contentcuration/frontend/shared/views/policies/__tests__/policyModals.spec.js index da1da3aad8..ba49916857 100644 --- a/contentcuration/contentcuration/frontend/shared/views/policies/__tests__/policyModals.spec.js +++ b/contentcuration/contentcuration/frontend/shared/views/policies/__tests__/policyModals.spec.js @@ -46,7 +46,7 @@ describe('policyModals', () => { store: { ...store, state: { ...store.state, loggedIn: false } }, }); - expect(wrapper.isVueInstance()).toBe(true); + expect(wrapper.exists()).toBe(true); expect(wrapper.is(PolicyModals)).toBe(true); }); diff --git a/contentcuration/contentcuration/frontend/shared/vuetify/theme.js b/contentcuration/contentcuration/frontend/shared/vuetify/theme.js index 47caf85a99..4bbc4036b6 100644 --- a/contentcuration/contentcuration/frontend/shared/vuetify/theme.js +++ b/contentcuration/contentcuration/frontend/shared/vuetify/theme.js @@ -6,12 +6,12 @@ export default function theme() { const tokens = themeTokens(); return Object.assign( { - loading: palette.black, - primaryBackground: brand.primary.v_200, - backgroundColor: palette.grey.v_50, - greyBackground: palette.grey.v_200, + loading: palette.grey.v_900, + primaryBackground: brand.primary.v_100, + backgroundColor: palette.grey.v_100, + greyBackground: palette.grey.v_300, greyBorder: palette.grey.v_400, - grey: palette.grey.v_600, + grey: palette.grey.v_700, darkGrey: palette.grey.v_800, greenSuccess: tokens.success, topic: palette.grey.v_400, @@ -23,12 +23,12 @@ export default function theme() { html5: tokens.explore, zim: tokens.explore, slideshow: tokens.read, - channelHighlightDefault: palette.grey.v_200, - draggableDropZone: palette.grey.v_100, - draggableDropOverlay: brand.primary.v_400, - greenHighlightBackground: brand.secondary.v_200, - roleVisibilityCoach: palette.lightblue.v_1100, + channelHighlightDefault: palette.grey.v_300, + draggableDropZone: palette.grey.v_200, + draggableDropOverlay: brand.primary.v_200, + greenHighlightBackground: brand.secondary.v_100, + roleVisibilityCoach: palette.lightblue.v_600, }, - tokens + tokens, ); } diff --git a/contentcuration/contentcuration/frontend/shared/vuex/channel/__tests__/module.spec.js b/contentcuration/contentcuration/frontend/shared/vuex/channel/__tests__/module.spec.js index 182dd79fd6..d3bc874093 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/channel/__tests__/module.spec.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/channel/__tests__/module.spec.js @@ -66,13 +66,12 @@ describe('channel actions', () => { }); describe('loadChannel action', () => { - it('should call Channel.getCatalogChannel if user is not logged in', async done => { + it('should call Channel.getCatalogChannel if user is not logged in', async () => { store.state.session.currentUser.id = undefined; const getSpy = jest.spyOn(Channel, 'getCatalogChannel'); return store.dispatch('channel/loadChannel', id).then(() => { expect(getSpy).toHaveBeenCalledWith(id); getSpy.mockRestore(); - done(); }); }); it('should call Channel.get if user is logged in', () => { @@ -380,7 +379,7 @@ describe('Channel sharing vuex', () => { }); }); }); - it('should clear out old invitations', done => { + it('should clear out old invitations', async () => { const declinedInvitation = { id: 'choosy-invitation', email: 'choosy-collaborator@test.com', @@ -389,14 +388,9 @@ describe('Channel sharing vuex', () => { user: 'some-other-user', }; - Invitation.add(declinedInvitation).then(() => { - store.dispatch('channel/loadChannelUsers', channelId).then(() => { - expect(Object.keys(store.state.channel.invitationsMap)).not.toContain( - 'choosy-invitation' - ); - done(); - }); - }); + await Invitation.add(declinedInvitation); + await store.dispatch('channel/loadChannelUsers', channelId); + expect(Object.keys(store.state.channel.invitationsMap)).not.toContain('choosy-invitation'); }); }); diff --git a/contentcuration/contentcuration/frontend/shared/vuex/channel/actions.js b/contentcuration/contentcuration/frontend/shared/vuex/channel/actions.js index dede896b49..63cb3e14c1 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/channel/actions.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/channel/actions.js @@ -80,13 +80,15 @@ export function commitChannel( thumbnail = NOVALUE, thumbnail_encoding = NOVALUE, thumbnail_url = NOVALUE, - } = {} + } = {}, ) { - if (context.state.channelsMap[id]) { - if (!id) { - throw ReferenceError('id must be defined to update a channel'); + const buildChannelData = () => { + const channelData = {}; + + if (id) { + channelData.id = id; } - const channelData = { id }; + if (name !== NOVALUE) { channelData.name = name; } @@ -111,17 +113,40 @@ export function commitChannel( channelData.thumbnail_url = thumbnail_url; } if (contentDefaults !== NOVALUE) { - const originalData = context.state.channelsMap[id].content_defaults; - // Pick out only content defaults that have been changed. - contentDefaults = pickBy(contentDefaults, (value, key) => value !== originalData[key]); - if (Object.keys(contentDefaults).length) { + if (id) { + const originalData = context.state.channelsMap[id].content_defaults; + contentDefaults = pickBy(contentDefaults, (value, key) => value !== originalData[key]); + if (Object.keys(contentDefaults).length) { + channelData.content_defaults = contentDefaults; + } + } else { channelData.content_defaults = contentDefaults; } } + + return channelData; + }; + + const channelData = buildChannelData(); + + if (context.state.channelsMap[id]) { + if (!id) { + throw new ReferenceError('id must be defined to update a channel'); + } return Channel.createModel(channelData).then(() => { context.commit('UPDATE_CHANNEL', { id, ...channelData }); context.commit('SET_CHANNEL_NOT_NEW', id); }); + } else { + return Channel.createModel(channelData).then(response => { + const createdChannel = response; + if (!createdChannel || !createdChannel.id) { + throw new Error('Created channel data is invalid. Missing id.'); + } + + context.commit('ADD_CHANNEL', createdChannel); + return createdChannel; + }); } } @@ -141,7 +166,7 @@ export function updateChannel( thumbnail = NOVALUE, thumbnail_encoding = NOVALUE, thumbnail_url = NOVALUE, - } = {} + } = {}, ) { if (context.state.channelsMap[id]) { const channelData = {}; @@ -252,7 +277,7 @@ export function loadChannelUsers(context, channelId) { context.commit('SET_USERS_TO_CHANNEL', { channelId, users: results[0] }); context.commit( 'ADD_INVITATIONS', - results[1].filter(i => !i.accepted && !i.declined && !i.revoked) + results[1].filter(i => !i.accepted && !i.declined && !i.revoked), ); }); } diff --git a/contentcuration/contentcuration/frontend/shared/vuex/channel/getters.js b/contentcuration/contentcuration/frontend/shared/vuex/channel/getters.js index bcc8408809..e45cdd872a 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/channel/getters.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/channel/getters.js @@ -14,32 +14,32 @@ export function channels(state) { } export function getChannel(state) { - return function(channelId) { + return function (channelId) { return mapChannel(state, state.channelsMap[channelId]); }; } export function getChannels(state) { - return function(channelIds) { + return function (channelIds) { return channelIds.map(key => getChannel(state)(key)).filter(channel => channel); }; } export function getBookmarkedChannels(state) { - return function() { + return function () { return getChannels(state)(Object.keys(state.bookmarksMap)); }; } export function getChannelIsValid(state) { - return function(channelId) { + return function (channelId) { const channel = state.channelsMap[channelId]; return channel && channel.name && channel.name.length > 0; }; } export function getChannelUsers(state) { - return function(channelId, shareMode = SharingPermissions.VIEW_ONLY) { + return function (channelId, shareMode = SharingPermissions.VIEW_ONLY) { let channelUserIds; if (shareMode === SharingPermissions.EDIT) { channelUserIds = Object.keys(state.channelEditorsMap[channelId] || {}); @@ -51,43 +51,43 @@ export function getChannelUsers(state) { } export function getInvitation(state) { - return function(invitationId) { + return function (invitationId) { return state.invitationsMap[invitationId]; }; } export function getChannelInvitations(state) { - return function(channelId, shareMode = SharingPermissions.VIEW_ONLY) { + return function (channelId, shareMode = SharingPermissions.VIEW_ONLY) { return Object.values(state.invitationsMap).filter( invitation => invitation.channel === channelId && invitation.share_mode === shareMode && !invitation.accepted && !invitation.declined && - !invitation.revoked + !invitation.revoked, ); }; } export function checkUsers(state) { - return function(channelId, email) { + return function (channelId, email) { return Object.values(SharingPermissions).some(shareMode => getChannelUsers(state)(channelId, shareMode).some( - user => user && user.email.toLowerCase() === email.toLowerCase() - ) + user => user && user.email.toLowerCase() === email.toLowerCase(), + ), ); }; } export function checkInvitations(state) { - return function(channelId, email) { + return function (channelId, email) { return Object.values(state.invitationsMap).some( invitation => invitation.channel === channelId && invitation.email.toLowerCase() === email.toLowerCase() && !invitation.revoked && !invitation.declined && - !invitation.accepted + !invitation.accepted, ); }; } diff --git a/contentcuration/contentcuration/frontend/shared/vuex/channel/mutations.js b/contentcuration/contentcuration/frontend/shared/vuex/channel/mutations.js index 2bef394c9a..62c7147af8 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/channel/mutations.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/channel/mutations.js @@ -1,4 +1,4 @@ -import Vue from 'vue'; +import Vue, { set } from 'vue'; import pick from 'lodash/pick'; import { ContentDefaults, NEW_OBJECT } from 'shared/constants'; import { mergeMapItem } from 'shared/vuex/utils'; @@ -10,10 +10,10 @@ export function ADD_CHANNEL(state, channel) { if (!channel.id) { throw ReferenceError('id must be defined to update a channel'); } - Vue.set( + set( state.channelsMap, channel.id, - Object.assign({}, state.channelsMap[channel.id] || {}, channel) + Object.assign({}, state.channelsMap[channel.id] || {}, channel), ); } @@ -39,14 +39,14 @@ export function UPDATE_CHANNEL(state, { id, content_defaults = {}, ...payload } } const channel = state.channelsMap[id]; if (channel) { - Vue.set(state.channelsMap, id, { + set(state.channelsMap, id, { ...channel, ...payload, // Assign all acceptable content defaults into the channel defaults content_defaults: Object.assign( {}, channel.content_defaults || {}, - pick(content_defaults, Object.keys(ContentDefaults)) + pick(content_defaults, Object.keys(ContentDefaults)), ), }); } @@ -54,12 +54,12 @@ export function UPDATE_CHANNEL(state, { id, content_defaults = {}, ...payload } export function UPDATE_CHANNEL_FROM_INDEXEDDB(state, { id, ...mods }) { if (id && state.channelsMap[id]) { - Vue.set(state.channelsMap, id, { ...applyMods(state.channelsMap[id], mods) }); + set(state.channelsMap, id, { ...applyMods(state.channelsMap[id], mods) }); } } export function SET_BOOKMARK(state, { channel }) { - Vue.set(state.bookmarksMap, channel, true); + set(state.bookmarksMap, channel, true); } export function DELETE_BOOKMARK(state, { channel }) { @@ -86,7 +86,7 @@ export function SET_USERS_TO_CHANNEL(state, { channelId, users = [] } = {}) { const canView = user.can_view; delete user.can_edit; delete user.can_view; - Vue.set(state.channelUsersMap, user.id, user); + set(state.channelUsersMap, user.id, user); if (canEdit) { ADD_EDITOR_TO_CHANNEL(state, { channel: channelId, user: user.id }); } else if (canView) { @@ -97,9 +97,9 @@ export function SET_USERS_TO_CHANNEL(state, { channelId, users = [] } = {}) { export function ADD_VIEWER_TO_CHANNEL(state, { channel, user } = {}) { if (!state.channelViewersMap[channel]) { - Vue.set(state.channelViewersMap, channel, {}); + set(state.channelViewersMap, channel, {}); } - Vue.set(state.channelViewersMap[channel], user, true); + set(state.channelViewersMap[channel], user, true); } export function REMOVE_VIEWER_FROM_CHANNEL(state, { channel, user } = {}) { @@ -110,9 +110,9 @@ export function REMOVE_VIEWER_FROM_CHANNEL(state, { channel, user } = {}) { export function ADD_EDITOR_TO_CHANNEL(state, { channel, user } = {}) { if (!state.channelEditorsMap[channel]) { - Vue.set(state.channelEditorsMap, channel, {}); + set(state.channelEditorsMap, channel, {}); } - Vue.set(state.channelEditorsMap[channel], user, true); + set(state.channelEditorsMap[channel], user, true); } export function REMOVE_EDITOR_FROM_CHANNEL(state, { channel, user } = {}) { diff --git a/contentcuration/contentcuration/frontend/shared/vuex/connectionPlugin/connectionModule.js b/contentcuration/contentcuration/frontend/shared/vuex/connectionPlugin/connectionModule.js index 0ceb330c34..c982c1b9b7 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/connectionPlugin/connectionModule.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/connectionPlugin/connectionModule.js @@ -56,7 +56,7 @@ export default { setTimeout(() => pollingClient.get(stealth), 1000 * delaySeconds(attempt)); } return Promise.reject(error); - } + }, ); pollingClient.get(stealth); diff --git a/contentcuration/contentcuration/frontend/shared/vuex/connectionPlugin/index.js b/contentcuration/contentcuration/frontend/shared/vuex/connectionPlugin/index.js index 7a098fe9d4..54972f3d66 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/connectionPlugin/index.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/connectionPlugin/index.js @@ -18,7 +18,7 @@ const ConnectionPlugin = store => { store.dispatch('handleDisconnection'); } return Promise.reject(error); - } + }, ); client.interceptors.response.handlers.reverse(); diff --git a/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/index.js b/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/index.js index bc4600f010..2c4d795463 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/index.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/index.js @@ -68,12 +68,12 @@ function DraggablePlugin(store) { let clientX, clientY; let addedDragOverListener = false; - const dragOverEventListener = function(e) { + const dragOverEventListener = function (e) { clientX = e.clientX; clientY = e.clientY; }; - const cancelEventListener = function(e) { + const cancelEventListener = function (e) { if ('code' in e) { if (e.code !== 'Escape') { return; diff --git a/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/actions.js b/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/actions.js index de09ded7ab..c0944f5140 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/actions.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/actions.js @@ -132,9 +132,8 @@ export function setDraggableDropped(context, identity) { // We can add grouped handles to this sources array const sources = [source].map(cloneDeep); - const { hoverDraggableSection, hoverDraggableTarget } = context.rootState.draggable[ - `${identity.type}s` - ]; + const { hoverDraggableSection, hoverDraggableTarget } = + context.rootState.draggable[`${identity.type}s`]; const target = { identity: cloneDeep(identity), section: hoverDraggableSection, diff --git a/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/getters.js b/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/getters.js index 939e6077b6..515bda3b19 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/getters.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/getters.js @@ -21,7 +21,7 @@ export function isHoverDraggableAncestor(state, getters, rootState, rootGetters) * @param {Object} identity * @return {Boolean} */ - return function(identity) { + return function (identity) { const { type } = getters.deepestHoverDraggable || {}; return type ? rootGetters[`draggable/${type}s/isHoverDraggableAncestor`](identity) : false; }; @@ -57,7 +57,7 @@ export function activeDraggableSize(state, getters, rootState) { } export function isGroupedDraggableHandle(state) { - return function(identity) { + return function (identity) { if (identity.type === DraggableTypes.HANDLE) { const { key } = new DraggableIdentityHelper(identity); return key in state.groupedDraggableHandles; @@ -81,7 +81,7 @@ export function getDraggableDropData(state) { * relative: Number * }}|undefined} */ - return function(identity) { + return function (identity) { // Ancestors will map to the string of the actual data, instead of duplicating, // as prepared in code below const destination = new DraggableIdentityHelper(identity); diff --git a/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/mutations.js b/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/mutations.js index df7d977276..3feb287d17 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/mutations.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/mutations.js @@ -1,4 +1,4 @@ -import Vue from 'vue'; +import Vue, { set } from 'vue'; import { DraggableFlags } from './constants'; import { DraggableIdentityHelper } from 'shared/vuex/draggablePlugin/module/utils'; @@ -20,7 +20,7 @@ export function RESET_ACTIVE_DRAGGABLE_UNIVERSE(state) { export function ADD_GROUPED_HANDLE(state, identity) { const { key } = new DraggableIdentityHelper(identity); - Vue.set(state.groupedDraggableHandles, key, identity); + set(state.groupedDraggableHandles, key, identity); } export function REMOVE_GROUPED_HANDLE(state, identity) { @@ -43,7 +43,7 @@ export function RESET_DRAGGABLE_DIRECTION(state) { export function ADD_DRAGGABLE_CONTAINER_DROPS(state, data) { for (const key in data) { - Vue.set(state.draggableContainerDrops, key, data[key]); + set(state.draggableContainerDrops, key, data[key]); } } diff --git a/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/submodule/getters.js b/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/submodule/getters.js index 261cd87ed1..0eaaef5c97 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/submodule/getters.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/submodule/getters.js @@ -20,7 +20,7 @@ export function isHoverDraggableAncestor(state, getters) { * @param {Object} identity * @return {Boolean} */ - return function({ id, type }) { + return function ({ id, type }) { return Boolean(getters.getHoverAncestor({ id, type })); }; } @@ -29,7 +29,7 @@ export function getHoverAncestor(state) { /** * @param {Object} match - An object with which it will test for match with ancestor */ - return function(match) { + return function (match) { return new DraggableIdentityHelper(state.hoverDraggable).findClosestAncestor(match); }; } diff --git a/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/submodule/mutations.js b/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/submodule/mutations.js index 346828bb77..3673050b80 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/submodule/mutations.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/draggablePlugin/module/submodule/mutations.js @@ -1,4 +1,4 @@ -import Vue from 'vue'; +import { set } from 'vue'; import { DraggableFlags, DraggableIdentity } from '../constants'; /** @@ -12,7 +12,7 @@ function setIdentity(state, name, obj = null) { } Object.keys(obj).forEach(key => { - Vue.set(state[name], key, obj[key]); + set(state[name], key, obj[key]); }); } diff --git a/contentcuration/contentcuration/frontend/shared/vuex/file/__tests__/clean.spec.js b/contentcuration/contentcuration/frontend/shared/vuex/file/__tests__/clean.spec.js new file mode 100644 index 0000000000..05513c9ed4 --- /dev/null +++ b/contentcuration/contentcuration/frontend/shared/vuex/file/__tests__/clean.spec.js @@ -0,0 +1,124 @@ +import JSZip from 'jszip'; +import { cleanFile } from '../clean'; + +describe('cleanFile', () => { + let mockZip; + let originalCreateElement; + + beforeEach(() => { + mockZip = new JSZip(); + // Store original createElement + originalCreateElement = global.document.createElement; + }); + + afterEach(() => { + // Restore original methods + global.document.createElement = originalCreateElement; + jest.restoreAllMocks(); + }); + + // Helper function to create a zip file with given contents + async function createTestZip(files, options = {}) { + for (const [path, content] of Object.entries(files)) { + mockZip.file(path, content); + } + const zipContent = await mockZip.generateAsync({ type: 'blob' }); + return new File([zipContent], options.filename || 'test.zip', { + type: 'application/zip', + lastModified: options.lastModified || Date.now(), + }); + } + + describe('HTML5 zip cleaning', () => { + it('should remove unnecessary nesting from zip files', async () => { + const originalFiles = { + 'dist/index.html': '', + 'dist/css/style.css': 'body {}', + 'dist/js/main.js': "console.log('hello')", + }; + + const file = await createTestZip(originalFiles, { filename: 'test.zip' }); + const cleanedFile = await cleanFile(file); + + // Verify cleaned content + const cleanedZip = await JSZip.loadAsync(cleanedFile); + const cleanedPaths = Object.keys(cleanedZip.files); + + expect(cleanedPaths).toContain('index.html'); + expect(cleanedPaths).toContain('css/style.css'); + expect(cleanedPaths).toContain('js/main.js'); + expect(cleanedPaths).not.toContain('dist/index.html'); + }); + + it('should preserve file structure when no unnecessary nesting exists', async () => { + const originalFiles = { + 'index.html': '', + 'css/style.css': 'body {}', + 'js/main.js': "console.log('hello')", + }; + + const file = await createTestZip(originalFiles); + const cleanedFile = await cleanFile(file); + + // Verify cleaned content + const cleanedZip = await JSZip.loadAsync(cleanedFile); + const cleanedPaths = Object.keys(cleanedZip.files); + + expect(cleanedPaths).toHaveLength(Object.keys(originalFiles).length); + expect(cleanedPaths).toContain('index.html'); + expect(cleanedPaths).toContain('css/style.css'); + expect(cleanedPaths).toContain('js/main.js'); + }); + + it('should handle deeply nested structures', async () => { + const originalFiles = { + 'project/src/dist/build/index.html': '', + 'project/src/dist/build/assets/style.css': 'body {}', + }; + + const file = await createTestZip(originalFiles); + const cleanedFile = await cleanFile(file); + + const cleanedZip = await JSZip.loadAsync(cleanedFile); + const cleanedPaths = Object.keys(cleanedZip.files); + + expect(cleanedPaths).toContain('index.html'); + expect(cleanedPaths).toContain('assets/style.css'); + }); + }); + + describe('Error handling', () => { + it('should throw error for corrupt zip files', async () => { + const corruptFile = new File(['not a zip file'], 'test.zip', { type: 'application/zip' }); + await expect(async () => await cleanFile(corruptFile)).rejects.toThrow(); + }); + + it('should handle missing files in zip gracefully', async () => { + const emptyZip = await mockZip.generateAsync({ type: 'blob' }); + const file = new File([emptyZip], 'test.zip', { type: 'application/zip' }); + + const cleanedFile = await cleanFile(file); + const cleanedZip = await JSZip.loadAsync(cleanedFile); + + expect(Object.keys(cleanedZip.files)).toHaveLength(0); + }); + }); + + describe('Non-HTML5 files', () => { + it('should pass through non-zip files unchanged', async () => { + const imageFile = new File(['fake image data'], 'test.jpg', { type: 'image/jpeg' }); + const result = await cleanFile(imageFile); + + expect(result).toBe(imageFile); + }); + + it('should pass through unsupported formats unchanged', async () => { + const unknownFile = new File(['unknown data'], 'test.xyz', { + type: 'application/octet-stream', + }); + const result = await cleanFile(unknownFile); + + expect(result).toBe(unknownFile); + }); + }); +}); diff --git a/contentcuration/contentcuration/frontend/shared/vuex/file/__tests__/module.spec.js b/contentcuration/contentcuration/frontend/shared/vuex/file/__tests__/module.spec.js index 3f38ceabd8..a265c9ba8d 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/file/__tests__/module.spec.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/file/__tests__/module.spec.js @@ -148,9 +148,9 @@ describe('file store', () => { describe('H5P content file extract metadata', () => { it('getH5PMetadata should check for h5p.json file', () => { const zip = new JSZip(); - return zip.generateAsync({ type: 'blob' }).then(async function(h5pBlob) { + return zip.generateAsync({ type: 'blob' }).then(async function (h5pBlob) { await expect(getH5PMetadata(h5pBlob)).rejects.toThrow( - 'h5p.json not found in the H5P file.' + 'h5p.json not found in the H5P file.', ); }); }); @@ -158,7 +158,7 @@ describe('file store', () => { const manifestFile = get_metadata_file({ title: 'Test file' }); const zip = new JSZip(); zip.file('h5p.json', manifestFile); - await zip.generateAsync({ type: 'blob' }).then(async function(h5pBlob) { + await zip.generateAsync({ type: 'blob' }).then(async function (h5pBlob) { await expect(getH5PMetadata(h5pBlob)).resolves.toEqual({ title: 'Test file', }); @@ -168,7 +168,7 @@ describe('file store', () => { const manifestFile = get_metadata_file({ title: 'Test file', language: 'und' }); const zip = new JSZip(); zip.file('h5p.json', manifestFile); - await zip.generateAsync({ type: 'blob' }).then(async function(h5pBlob) { + await zip.generateAsync({ type: 'blob' }).then(async function (h5pBlob) { await expect(getH5PMetadata(h5pBlob)).resolves.toEqual({ title: 'Test file', }); @@ -186,7 +186,7 @@ describe('file store', () => { const manifestFile = get_metadata_file({ title: 'Test file', license: licenseName }); const zip = new JSZip(); zip.file('h5p.json', manifestFile); - await zip.generateAsync({ type: 'blob' }).then(async function(h5pBlob) { + await zip.generateAsync({ type: 'blob' }).then(async function (h5pBlob) { await expect(getH5PMetadata(h5pBlob)).resolves.toEqual({ title: 'Test file', license: licenseId, @@ -202,7 +202,7 @@ describe('file store', () => { const manifestFile = get_metadata_file({ title: 'Test file', authors: [authorObj] }); const zip = new JSZip(); zip.file('h5p.json', manifestFile); - await zip.generateAsync({ type: 'blob' }).then(async function(h5pBlob) { + await zip.generateAsync({ type: 'blob' }).then(async function (h5pBlob) { await expect(getH5PMetadata(h5pBlob)).resolves.toEqual({ title: 'Test file', [field]: authorObj.name, @@ -216,7 +216,7 @@ describe('file store', () => { }); const zip = new JSZip(); zip.file('h5p.json', manifestFile); - await zip.generateAsync({ type: 'blob' }).then(async function(h5pBlob) { + await zip.generateAsync({ type: 'blob' }).then(async function (h5pBlob) { await expect(getH5PMetadata(h5pBlob)).resolves.toEqual({ title: 'Test file', }); @@ -229,7 +229,7 @@ describe('file store', () => { }); const zip = new JSZip(); zip.file('h5p.json', manifestFile); - await zip.generateAsync({ type: 'blob' }).then(async function(h5pBlob) { + await zip.generateAsync({ type: 'blob' }).then(async function (h5pBlob) { await expect(getH5PMetadata(h5pBlob)).resolves.toEqual({ title: 'Test file', language: 'en', diff --git a/contentcuration/contentcuration/frontend/shared/vuex/file/__tests__/validation.spec.js b/contentcuration/contentcuration/frontend/shared/vuex/file/__tests__/validation.spec.js new file mode 100644 index 0000000000..72f8c9e948 --- /dev/null +++ b/contentcuration/contentcuration/frontend/shared/vuex/file/__tests__/validation.spec.js @@ -0,0 +1,249 @@ +import JSZip from 'jszip'; +import { IMAGE_PRESETS, VIDEO_PRESETS } from '../utils'; +import { + validateFile, + VALID, + INVALID_UNREADABLE_FILE, + INVALID_UNSUPPORTED_FORMAT, + INVALID_HTML5_ZIP, +} from '../validation'; +import FormatPresets from 'shared/leUtils/FormatPresets'; + +describe('validateFile', () => { + let mockObjectUrl; + let originalCreateElement; + + beforeEach(() => { + mockObjectUrl = 'blob:mock-url'; + + // Mock URL methods + global.URL.createObjectURL = jest.fn(() => mockObjectUrl); + global.URL.revokeObjectURL = jest.fn(); + + // Store original createElement + originalCreateElement = global.document.createElement; + + // Mock createElement for media elements + global.document.createElement = function (tagName) { + if (['audio', 'video', 'img'].includes(tagName)) { + return { + set src(url) { + // Small delay to simulate async loading + setTimeout(() => { + if (tagName === 'img') { + this.onload?.(); + } else { + this.onloadedmetadata?.(); + } + }, 0); + }, + }; + } + return originalCreateElement.call(document, tagName); + }; + }); + + afterEach(() => { + // Restore original methods + global.document.createElement = originalCreateElement; + jest.restoreAllMocks(); + }); + + // Helper function to create a mock file + const createMockFile = (name, type = '') => new File([], name, { type }); + + // Helper to create failing media element + const createFailingElement = () => ({ + set src(url) { + setTimeout(() => this.onerror?.(new Error('Failed to load')), 0); + }, + }); + + describe('Format validation', () => { + it('should reject unsupported file formats', async () => { + const file = createMockFile('test.unknown'); + const result = await validateFile(file); + expect(result).toBe(INVALID_UNSUPPORTED_FORMAT); + }); + + it('should accept supported non-media formats without validation', async () => { + const file = createMockFile('test.pdf'); // document preset + const result = await validateFile(file); + expect(result).toBe(VALID); + }); + }); + + describe('Audio validation', () => { + it('should validate MP3 files correctly', async () => { + const file = createMockFile('test.mp3', 'audio/mpeg'); + const result = await validateFile(file); + expect(result).toBe(VALID); + }); + + it('should handle audio load errors', async () => { + global.document.createElement = function (tagName) { + if (tagName === 'audio') { + return createFailingElement('audio'); + } + return originalCreateElement.call(document, tagName); + }; + + const file = createMockFile('test.mp3', 'audio/mpeg'); + const result = await validateFile(file); + expect(result).toBe(INVALID_UNREADABLE_FILE); + }); + }); + + describe('Video validation', () => { + const videoFormats = []; + + for (const preset of VIDEO_PRESETS) { + const presetObject = FormatPresets.get(preset); + for (const ext of presetObject.allowed_formats) { + videoFormats.push({ preset, ext, type: `video/${ext}` }); + } + } + + test.each(videoFormats)('should validate %j files correctly', async ({ ext, type }) => { + const file = createMockFile(`test.${ext}`, type); + const result = await validateFile(file); + expect(result).toBe(VALID); + }); + + it('should handle video load errors', async () => { + global.document.createElement = function (tagName) { + if (tagName === 'video') { + return createFailingElement('video'); + } + return originalCreateElement.call(document, tagName); + }; + + const file = createMockFile('test.mp4', 'video/mp4'); + const result = await validateFile(file); + expect(result).toBe(INVALID_UNREADABLE_FILE); + }); + }); + + describe('Image validation', () => { + const imagePresets = []; + + for (const preset of IMAGE_PRESETS) { + const presetObject = FormatPresets.get(preset); + if (presetObject.display) { + for (const ext of presetObject.allowed_formats) { + imagePresets.push({ preset, ext, type: ext === 'jpg' ? 'image/jpeg' : `image/${ext}` }); + } + } + } + + test.each(imagePresets)('should validate %j files correctly', async ({ ext, type }) => { + const file = createMockFile(`test.${ext}`, type); + const result = await validateFile(file); + expect(result).toBe(VALID); + }); + + it('should handle image load errors', async () => { + global.document.createElement = function (tagName) { + if (tagName === 'img') { + return createFailingElement('img'); + } + return originalCreateElement.call(document, tagName); + }; + + const file = createMockFile('test.png', 'image/png'); + const result = await validateFile(file); + expect(result).toBe(INVALID_UNREADABLE_FILE); + }); + }); + + describe('HTML5 zip validation', () => { + let mockZip; + + beforeEach(() => { + mockZip = new JSZip(); + }); + + it('should validate zip with root index.html', async () => { + mockZip.file('index.html', ''); + mockZip.file('assets/style.css', 'body {}'); + const zipContent = await mockZip.generateAsync({ type: 'blob' }); + const file = new File([zipContent], 'test.zip', { type: 'application/zip' }); + + const result = await validateFile(file); + expect(result).toBe(VALID); + }); + + it('should validate zip with nested index.html in common root', async () => { + mockZip.file('dist/index.html', ''); + mockZip.file('dist/assets/style.css', 'body {}'); + const zipContent = await mockZip.generateAsync({ type: 'blob' }); + const file = new File([zipContent], 'test.zip', { type: 'application/zip' }); + + const result = await validateFile(file); + expect(result).toBe(VALID); + }); + + it('should validate zip with alternative HTML file when no index.html exists', async () => { + mockZip.file('main.html', ''); + mockZip.file('assets/style.css', 'body {}'); + const zipContent = await mockZip.generateAsync({ type: 'blob' }); + const file = new File([zipContent], 'test.zip', { type: 'application/zip' }); + + const result = await validateFile(file); + expect(result).toBe(VALID); + }); + + it('should validate zip with deeply nested alternative HTML file', async () => { + mockZip.file('project/dist/src/main.html', ''); + mockZip.file('project/dist/assets/style.css', 'body {}'); + const zipContent = await mockZip.generateAsync({ type: 'blob' }); + const file = new File([zipContent], 'test.zip', { type: 'application/zip' }); + + const result = await validateFile(file); + expect(result).toBe(VALID); + }); + + it('should reject zip without any HTML files', async () => { + mockZip.file('styles.css', 'body {}'); + mockZip.file('script.js', "console.log('test');"); + const zipContent = await mockZip.generateAsync({ type: 'blob' }); + const file = new File([zipContent], 'test.zip', { type: 'application/zip' }); + + const result = await validateFile(file); + expect(result).toBe(INVALID_HTML5_ZIP); + }); + + it('should reject corrupted zip files', async () => { + const file = new File(['not a zip file'], 'test.zip', { type: 'application/zip' }); + const result = await validateFile(file); + expect(result).toBe(INVALID_UNREADABLE_FILE); + }); + }); + + describe('Resource cleanup', () => { + it('should clean up object URLs after successful validation', async () => { + const file = createMockFile('test.mp3', 'audio/mpeg'); + await validateFile(file); + + expect(URL.createObjectURL).toHaveBeenCalledTimes(1); + expect(URL.revokeObjectURL).toHaveBeenCalledTimes(1); + expect(URL.revokeObjectURL).toHaveBeenCalledWith(mockObjectUrl); + }); + + it('should clean up object URLs after failed validation', async () => { + global.document.createElement = function (tagName) { + if (tagName === 'audio') { + return createFailingElement('audio'); + } + return originalCreateElement.call(document, tagName); + }; + + const file = createMockFile('test.mp3', 'audio/mpeg'); + await validateFile(file); + + expect(URL.createObjectURL).toHaveBeenCalledTimes(1); + expect(URL.revokeObjectURL).toHaveBeenCalledTimes(1); + expect(URL.revokeObjectURL).toHaveBeenCalledWith(mockObjectUrl); + }); + }); +}); diff --git a/contentcuration/contentcuration/frontend/shared/vuex/file/actions.js b/contentcuration/contentcuration/frontend/shared/vuex/file/actions.js index 778ebec9fa..ea1c38008c 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/file/actions.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/file/actions.js @@ -1,3 +1,4 @@ +import { cleanFile } from './clean'; import { getHash, extractMetadata, storageUrl } from './utils'; import { File } from 'shared/data/resources'; import client from 'shared/client'; @@ -117,14 +118,14 @@ function hexToBase64(str) { .replace(/\r|\n/g, '') .replace(/([\da-fA-F]{2}) ?/g, '0x$1 ') .replace(/ +$/, '') - .split(' ') - ) + .split(' '), + ), ); } export function uploadFileToStorage( context, - { id, file_format, mightSkip, checksum, file, url, contentType } + { id, file_format, mightSkip, checksum, file, url, contentType }, ) { return (mightSkip ? client.head(storageUrl(checksum, file_format)) : Promise.reject()) .then(() => { @@ -167,11 +168,9 @@ export function uploadFileToStorage( /** * @return {Promise<{uploadPromise: Promise, fileObject: Object}>} */ -export function uploadFile(context, { file, preset = null } = {}) { - const file_format = file.name - .split('.') - .pop() - .toLowerCase(); +export async function uploadFile(context, { file, preset = null } = {}) { + const file_format = file.name.split('.').pop().toLowerCase(); + file = await cleanFile(file, preset); const hashPromise = getHash(file).catch(() => Promise.reject(fileErrors.CHECKSUM_HASH_FAILED)); let checksum, metadata = {}; @@ -279,3 +278,13 @@ export function getAudioData(context, url) { .catch(reject); }); } + +export function downloadFile(context, { url, fileName }) { + const anchor = document.createElement('a'); + anchor.download = fileName; + anchor.href = url; + anchor.style.display = 'none'; + document.body.appendChild(anchor); + anchor.click(); + anchor.remove(); +} diff --git a/contentcuration/contentcuration/frontend/shared/vuex/file/clean.js b/contentcuration/contentcuration/frontend/shared/vuex/file/clean.js new file mode 100644 index 0000000000..a774cbdda7 --- /dev/null +++ b/contentcuration/contentcuration/frontend/shared/vuex/file/clean.js @@ -0,0 +1,74 @@ +import JSZip from 'jszip'; +import { inferPreset } from './utils'; +import { createPredictableZip, findCommonRoot } from 'shared/utils/zipFile'; +import { FormatPresetsNames } from 'shared/leUtils/FormatPresets'; + +/** + * Creates a new files object with common root directory removed + * @param {Object} files - JSZip files object + * @param {string} commonRoot - Common root path to remove + * @returns {Object} New files object with paths remapped + */ +async function remapFiles(files, commonRoot) { + const cleanedFiles = {}; + commonRoot = commonRoot === '' ? commonRoot : commonRoot + '/'; + const commonRootLength = commonRoot.length; + + for (const [path, file] of Object.entries(files)) { + if (!file.dir) { + // Skip directory entries + const newPath = path.slice(commonRootLength); + cleanedFiles[newPath] = await file.async('uint8array'); + } + } + + return cleanedFiles; +} + +/** + * Cleans an HTML5 zip file by removing unnecessary directory nesting + * @param {File} file - The HTML5 zip file to clean + * @returns {Promise} - A promise that resolves to the cleaned file + */ +export async function cleanHTML5Zip(file) { + // Load and process the zip file + const zip = new JSZip(); + const zipContent = await zip.loadAsync(file); + + // Find and remove common root directory + const commonRoot = findCommonRoot(zipContent.files); + const cleanedFiles = await remapFiles(zipContent.files, commonRoot); + + // Create new predictable zip with cleaned files + const cleanedZipBuffer = await createPredictableZip(cleanedFiles); + + // Create new File object with original metadata + const cleanedFile = new File([cleanedZipBuffer], file.name, { + type: file.type, + lastModified: file.lastModified, + }); + + return cleanedFile; +} + +/** + * Cleans a file based on its format. Currently only supports HTML5 zip files. + * Other files are passed through unchanged. + * @param {File} file - The file to clean + * @param {string} preset - The preset type of the file + * @returns {Promise} - A promise that resolves to the cleaned file + */ +export async function cleanFile(file, preset = null) { + preset = inferPreset(file, preset); + if (!preset) { + return file; // Pass through files with unknown preset + } + + // Clean file based on preset type + if (preset === FormatPresetsNames.HTML5_ZIP) { + return await cleanHTML5Zip(file); + } + + // Pass through files with other presets unchanged + return file; +} diff --git a/contentcuration/contentcuration/frontend/shared/vuex/file/getters.js b/contentcuration/contentcuration/frontend/shared/vuex/file/getters.js index c6817290c2..d99e7dde53 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/file/getters.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/file/getters.js @@ -4,7 +4,7 @@ import FormatPresets from 'shared/leUtils/FormatPresets'; import Languages from 'shared/leUtils/Languages'; export function getFileUpload(state) { - return function(id) { + return function (id) { const fileUpload = state.fileUploadsMap[id]; if (fileUpload) { return { @@ -56,7 +56,7 @@ function parseFileObject(state, file) { } export function getContentNodeFileById(state) { - return function(contentNodeId, fileId) { + return function (contentNodeId, fileId) { const file = (state.contentNodeFilesMap[contentNodeId] || {})[fileId]; if (file) { return parseFileObject(state, file); @@ -65,7 +65,7 @@ export function getContentNodeFileById(state) { } export function getContentNodeFiles(state) { - return function(contentNodeId) { + return function (contentNodeId) { return Object.values(state.contentNodeFilesMap[contentNodeId] || {}) .map(f => parseFileObject(state, f)) .filter(f => f); @@ -75,7 +75,7 @@ export function getContentNodeFiles(state) { export function contentNodesAreUploading(state) { return contentNodeIds => { return flatMap(contentNodeIds, contentNodeId => getContentNodeFiles(state)(contentNodeId)).some( - file => file.uploading + file => file.uploading, ); }; } @@ -83,7 +83,7 @@ export function contentNodesAreUploading(state) { export function contentNodesTotalSize(state) { return contentNodeIds => { return flatMap(contentNodeIds, contentNodeId => - getContentNodeFiles(state)(contentNodeId) + getContentNodeFiles(state)(contentNodeId), ).reduce((sum, f) => sum + f.file_size, 0); }; } diff --git a/contentcuration/contentcuration/frontend/shared/vuex/file/mutations.js b/contentcuration/contentcuration/frontend/shared/vuex/file/mutations.js index e7546cbe6e..7fb81cfe20 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/file/mutations.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/file/mutations.js @@ -1,18 +1,18 @@ -import Vue from 'vue'; +import Vue, { set } from 'vue'; import { mergeMapItem } from 'shared/vuex/utils'; import { applyMods } from 'shared/data/applyRemoteChanges'; function updateFileMaps(state, file) { if (file.assessment_item) { if (!state.assessmentItemFilesMap[file.assessment_item]) { - Vue.set(state.assessmentItemFilesMap, file.assessment_item, {}); + set(state.assessmentItemFilesMap, file.assessment_item, {}); } - Vue.set(state.assessmentItemFilesMap[file.assessment_item], file.id, file); + set(state.assessmentItemFilesMap[file.assessment_item], file.id, file); } else if (file.contentnode) { if (!state.contentNodeFilesMap[file.contentnode]) { - Vue.set(state.contentNodeFilesMap, file.contentnode, {}); + set(state.contentNodeFilesMap, file.contentnode, {}); } - Vue.set(state.contentNodeFilesMap[file.contentnode], file.id, file); + set(state.contentNodeFilesMap[file.contentnode], file.id, file); } } @@ -34,7 +34,7 @@ export function ADD_FILES(state, files = []) { export function UPDATE_FILE_FROM_INDEXEDDB(state, { id, ...mods }) { if (id && state.fileUploadsMap[id]) { - Vue.set(state.fileUploadsMap, id, { ...applyMods(state.fileUploadsMap[id], mods) }); + set(state.fileUploadsMap, id, { ...applyMods(state.fileUploadsMap[id], mods) }); updateFileMaps(state, state.fileUploadsMap[id]); } } diff --git a/contentcuration/contentcuration/frontend/shared/vuex/file/utils.js b/contentcuration/contentcuration/frontend/shared/vuex/file/utils.js index 12e59b943c..f7b7c6258d 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/file/utils.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/file/utils.js @@ -3,17 +3,38 @@ import JSZip from 'jszip'; import { FormatPresetsList, FormatPresetsNames } from 'shared/leUtils/FormatPresets'; import { LicensesList } from 'shared/leUtils/Licenses'; import LanguagesMap from 'shared/leUtils/Languages'; +import { findFirstHtml } from 'shared/utils/zipFile'; const BLOB_SLICE = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice; const CHUNK_SIZE = 2097152; -const MEDIA_PRESETS = [ +const EXTRACTABLE_PRESETS = [ FormatPresetsNames.AUDIO, FormatPresetsNames.HIGH_RES_VIDEO, FormatPresetsNames.LOW_RES_VIDEO, FormatPresetsNames.H5P, + FormatPresetsNames.HTML5_ZIP, ]; -const VIDEO_PRESETS = [FormatPresetsNames.HIGH_RES_VIDEO, FormatPresetsNames.LOW_RES_VIDEO]; -const H5P_PRESETS = [FormatPresetsNames.H5P]; +export const VIDEO_PRESETS = [FormatPresetsNames.HIGH_RES_VIDEO, FormatPresetsNames.LOW_RES_VIDEO]; +const THUMBNAIL_PRESETS = [ + FormatPresetsNames.AUDIO_THUMBNAIL, + FormatPresetsNames.CHANNEL_THUMBNAIL, + FormatPresetsNames.DOCUMENT_THUMBNAIL, + FormatPresetsNames.EXERCISE_IMAGE, + FormatPresetsNames.EXERCISE_THUMBNAIL, + FormatPresetsNames.H5P_THUMBNAIL, + FormatPresetsNames.HTML5_THUMBNAIL, + FormatPresetsNames.QTI_THUMBNAIL, + FormatPresetsNames.SLIDESHOW_IMAGE, + FormatPresetsNames.SLIDESHOW_THUMBNAIL, + FormatPresetsNames.TOPIC_THUMBNAIL, + FormatPresetsNames.VIDEO_THUMBNAIL, + FormatPresetsNames.ZIM_THUMBNAIL, +]; + +export const IMAGE_PRESETS = THUMBNAIL_PRESETS.concat([ + FormatPresetsNames.EXERCISE_IMAGE, + FormatPresetsNames.SLIDESHOW_IMAGE, +]); export function getHash(file) { return new Promise((resolve, reject) => { @@ -21,7 +42,7 @@ export function getHash(file) { const spark = new SparkMD5.ArrayBuffer(); let currentChunk = 0; const chunks = Math.ceil(file.size / CHUNK_SIZE); - fileReader.onload = function(e) { + fileReader.onload = function (e) { spark.append(e.target.result); currentChunk++; @@ -44,7 +65,7 @@ export function getHash(file) { }); } -const extensionPresetMap = FormatPresetsList.reduce((map, value) => { +export const extensionPresetMap = FormatPresetsList.reduce((map, value) => { if (value.display) { value.allowed_formats.forEach(format => { if (!map[format]) { @@ -78,7 +99,7 @@ export async function getH5PMetadata(fileInput) { const metadata = {}; return zip .loadAsync(fileInput) - .then(function(zip) { + .then(function (zip) { const h5pJson = zip.file('h5p.json'); if (h5pJson) { return h5pJson.async('text'); @@ -86,7 +107,7 @@ export async function getH5PMetadata(fileInput) { throw new Error('h5p.json not found in the H5P file.'); } }) - .then(function(h5pContent) { + .then(function (h5pContent) { const data = JSON.parse(h5pContent); if (Object.prototype.hasOwnProperty.call(data, 'title')) { metadata.title = data['title']; @@ -122,6 +143,14 @@ export async function getH5PMetadata(fileInput) { }); } +export function inferPreset(file, presetHint) { + if (presetHint) { + return presetHint; + } + const fileFormat = file.name.split('.').pop().toLowerCase(); + return extensionPresetMap?.[fileFormat]?.[0]; +} + /** * @param {{name: String, preset: String}} file * @param {String|null} preset @@ -129,35 +158,31 @@ export async function getH5PMetadata(fileInput) { */ export function extractMetadata(file, preset = null) { const metadata = { - preset: file.preset || preset, + preset: inferPreset(file, preset), }; - if (!metadata.preset) { - const fileFormat = file.name - .split('.') - .pop() - .toLowerCase(); - // Default to whatever the first preset is - metadata.preset = extensionPresetMap[fileFormat][0]; - } - - // End here if not audio or video - if (!MEDIA_PRESETS.includes(metadata.preset)) { + // End here if we cannot infer further metadata from the file type + if (!EXTRACTABLE_PRESETS.includes(metadata.preset)) { return Promise.resolve(metadata); } - const isH5P = H5P_PRESETS.includes(metadata.preset); - - // Extract additional media metadata - const isVideo = VIDEO_PRESETS.includes(metadata.preset); - return new Promise(resolve => { - if (isH5P) { + if (FormatPresetsNames.H5P === metadata.preset) { getH5PMetadata(file).then(data => { Object.assign(metadata, data); }); resolve(metadata); + } else if (FormatPresetsNames.HTML5_ZIP === metadata.preset) { + findFirstHtml(file).then(htmlFile => { + if (htmlFile) { + metadata.extra_fields = metadata.extra_fields || {}; + metadata.extra_fields.options = metadata.extra_fields.options || {}; + metadata.extra_fields.options.entry = htmlFile; + } + resolve(metadata); + }); } else { + const isVideo = VIDEO_PRESETS.includes(metadata.preset); const mediaElement = document.createElement(isVideo ? 'video' : 'audio'); // Add a listener to read the metadata once it has loaded. mediaElement.addEventListener('loadedmetadata', () => { diff --git a/contentcuration/contentcuration/frontend/shared/vuex/file/validation.js b/contentcuration/contentcuration/frontend/shared/vuex/file/validation.js new file mode 100644 index 0000000000..62d771a614 --- /dev/null +++ b/contentcuration/contentcuration/frontend/shared/vuex/file/validation.js @@ -0,0 +1,114 @@ +import { VIDEO_PRESETS, IMAGE_PRESETS, inferPreset } from './utils'; +import { FormatPresetsNames } from 'shared/leUtils/FormatPresets'; +import { findFirstHtml } from 'shared/utils/zipFile'; + +// Validation result codes +export const VALID = 0; +export const INVALID_UNREADABLE_FILE = 1; +export const INVALID_UNSUPPORTED_FORMAT = 2; +export const INVALID_HTML5_ZIP = 3; + +const videoPresetsSet = new Set(VIDEO_PRESETS); +const imagePresetsSet = new Set(IMAGE_PRESETS); + +/** + * Validates an HTML5 zip file by checking for index.html + * @param {File} file - The zip file to validate + * @returns {Promise} - Resolves to validation result code + */ +async function validateHTML5Zip(file) { + try { + const entryPoint = await findFirstHtml(file); + return entryPoint ? VALID : INVALID_HTML5_ZIP; + } catch (e) { + return INVALID_UNREADABLE_FILE; + } +} + +/** + * Validates an image file using an Image object + * @param {string} objectUrl - Object URL for the media file + * @returns {Promise} - Resolves to result code + */ +function validateImage(objectUrl) { + return new Promise(resolve => { + const img = document.createElement('img'); + + img.onload = () => resolve(VALID); + img.onerror = () => resolve(INVALID_UNREADABLE_FILE); + img.src = objectUrl; + }); +} +/** + * Validates an audio file using an Audio object + * @param {string} objectUrl - Object URL for the media file + * @returns {Promise} - Resolves to result code + */ +function validateAudio(objectUrl) { + return new Promise(resolve => { + const audio = document.createElement('audio'); + + audio.onloadedmetadata = () => resolve(VALID); + audio.onerror = () => resolve(INVALID_UNREADABLE_FILE); + audio.src = objectUrl; + }); +} + +/** + * Validates a video file using a Video element + * @param {string} objectUrl - Object URL for the media file + * @returns {Promise} - Resolves to result code + */ +function validateVideo(objectUrl) { + return new Promise(resolve => { + const video = document.createElement('video'); + + video.onloadedmetadata = () => resolve(VALID); + video.onerror = () => resolve(INVALID_UNREADABLE_FILE); + video.src = objectUrl; + }); +} + +/** + * Validates a file is a supported preset and is valid + * @param {File} file - The file to validate + * @returns {Promise} - Resolves to validation result code + */ +export async function validateFile(file) { + // Get the preset definition + const preset = inferPreset(file); + if (!preset) { + return INVALID_UNSUPPORTED_FORMAT; + } + + if (preset === FormatPresetsNames.HTML5_ZIP) { + return await validateHTML5Zip(file); + } + + // Create object URL for validation if needed + if ( + // Audio formats + preset === FormatPresetsNames.AUDIO || + // Video formats + videoPresetsSet.has(preset) || + // Image formats including thumbnails + imagePresetsSet.has(preset) + ) { + const objectUrl = URL.createObjectURL(file); + try { + if (preset === FormatPresetsNames.AUDIO) { + return await validateAudio(objectUrl); + } else if (videoPresetsSet.has(preset)) { + return await validateVideo(objectUrl); + } else { + // All remaining presets are image types + return await validateImage(objectUrl); + } + } finally { + URL.revokeObjectURL(objectUrl); + } + } + + // If no validation needed, return valid + return VALID; +} diff --git a/contentcuration/contentcuration/frontend/shared/vuex/indexedDBPlugin/index.js b/contentcuration/contentcuration/frontend/shared/vuex/indexedDBPlugin/index.js index f8afa7445b..087fb167cd 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/indexedDBPlugin/index.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/indexedDBPlugin/index.js @@ -45,7 +45,7 @@ export class Listener { changeType = Number(changeType); if (!Object.values(CHANGE_TYPES).includes(changeType)) { throw RangeError( - `Change must be ${CHANGE_TYPES.CREATED}, ${CHANGE_TYPES.UPDATED}, or ${CHANGE_TYPES.DELETED}` + `Change must be ${CHANGE_TYPES.CREATED}, ${CHANGE_TYPES.UPDATED}, or ${CHANGE_TYPES.DELETED}`, ); } @@ -64,6 +64,7 @@ export class Listener { const eventName = this.getEventName(); if (!eventName) { + // eslint-disable-next-line no-console console.warn('Cannot register unbound listener: ' + this.callback.toString()); return; } @@ -81,7 +82,7 @@ export class Listener { * @return {Listener} */ export function commitListener(mutationName) { - return new Listener(function(store, obj) { + return new Listener(function (store, obj) { store.commit(this.prefix(mutationName), obj); }); } @@ -93,7 +94,7 @@ export function commitListener(mutationName) { * @return {Listener} */ export function dispatchListener(actionName) { - return new Listener(function(store, obj) { + return new Listener(function (store, obj) { store.dispatch(this.prefix(actionName), obj); }); } @@ -108,8 +109,8 @@ export default function IndexedDBPlugin(db, listeners = []) { const events = new EventEmitter(); events.setMaxListeners(1000); - db.on('changes', function(changes) { - changes.forEach(function(change) { + db.on('changes', function (changes) { + changes.forEach(function (change) { let obj = change.obj || {}; if (change.type === CHANGE_TYPES.UPDATED) { obj = change.mods; @@ -127,7 +128,7 @@ export default function IndexedDBPlugin(db, listeners = []) { }); }); - return function(store) { + return function (store) { listeners.forEach(listener => listener.register(events, store)); }; } diff --git a/contentcuration/contentcuration/frontend/shared/vuex/indexedDBPlugin/index.spec.js b/contentcuration/contentcuration/frontend/shared/vuex/indexedDBPlugin/index.spec.js index c29aea251a..d2b03499df 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/indexedDBPlugin/index.spec.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/indexedDBPlugin/index.spec.js @@ -7,55 +7,58 @@ import IndexedDBPlugin, { } from 'shared/vuex/indexedDBPlugin/index'; import { CHANGE_TYPES } from 'shared/data'; -describe('Listener', function() { +describe('Listener', function () { + let callback; + let listener; + beforeEach(() => { - this.callback = jest.fn(); - this.listener = new Listener(this.callback); + callback = jest.fn(); + listener = new Listener(callback); }); describe('.getEventName()', () => { it('should return null when unbound', () => { - expect(this.listener.getEventName()).toEqual(null); + expect(listener.getEventName()).toEqual(null); }); it("should return event name composed of table's name and change type", () => { - this.listener.tableName = 'testTable'; - this.listener.changeType = CHANGE_TYPES.CREATED; - expect(this.listener.getEventName()).toEqual('testTable/1'); + listener.tableName = 'testTable'; + listener.changeType = CHANGE_TYPES.CREATED; + expect(listener.getEventName()).toEqual('testTable/1'); }); }); describe('.prefix(name)', () => { it('should return the name without a namespacePrefix', () => { - expect(this.listener.prefix('someVuexName')).toEqual('someVuexName'); + expect(listener.prefix('someVuexName')).toEqual('someVuexName'); }); it('should prefix with namespacePrefix', () => { - this.listener.namespacePrefix = 'test'; - expect(this.listener.prefix('someVuexName')).toEqual('test/someVuexName'); + listener.namespacePrefix = 'test'; + expect(listener.prefix('someVuexName')).toEqual('test/someVuexName'); }); }); const bindTest = (namespacePrefix = null) => { it('should return a new Listener', () => { - const l = this.listener.bind('testTable', CHANGE_TYPES.CREATED, namespacePrefix); - expect(l).not.toEqual(this.listener); + const l = listener.bind('testTable', CHANGE_TYPES.CREATED, namespacePrefix); + expect(l).not.toEqual(listener); expect(l).toBeInstanceOf(Listener); }); it('should validate the changeType', () => { expect(() => { - this.listener.bind('testTable', -1, namespacePrefix); + listener.bind('testTable', -1, namespacePrefix); }).toThrow(/^Change must be/); }); it('should assign bind args on new instance', () => { - const l = this.listener.bind('testTable', CHANGE_TYPES.CREATED, namespacePrefix); + const l = listener.bind('testTable', CHANGE_TYPES.CREATED, namespacePrefix); - expect(this.listener.callback).toEqual(this.callback); - expect(this.listener.tableName).toEqual(null); - expect(this.listener.changeType).toEqual(null); - expect(this.listener.namespacePrefix).toEqual(null); + expect(listener.callback).toEqual(callback); + expect(listener.tableName).toEqual(null); + expect(listener.changeType).toEqual(null); + expect(listener.namespacePrefix).toEqual(null); - expect(l.callback).toEqual(this.callback); + expect(l.callback).toEqual(callback); expect(l.tableName).toEqual('testTable'); expect(l.changeType).toEqual(CHANGE_TYPES.CREATED); expect(l.namespacePrefix).toEqual(namespacePrefix); @@ -66,101 +69,114 @@ describe('Listener', function() { describe('.bind(tableName, changeType, namespacePrefix)', bindTest.bind({}, 'testNamespace')); }); -describe('commitListener', function() { +describe('commitListener', function () { + let commit; + let store; + let obj; + let listener; + beforeEach(() => { - this.commit = jest.fn(); - this.store = { - commit: this.commit, + commit = jest.fn(); + store = { + commit: commit, }; - this.obj = {}; - this.listener = commitListener('testMutationName'); + obj = {}; + listener = commitListener('testMutationName'); }); it('should return a Listener', () => { - expect(this.listener).toBeInstanceOf(Listener); + expect(listener).toBeInstanceOf(Listener); }); describe('returned Listener.callback', () => { it('should trigger store.commit()', () => { - this.listener.callback(this.store, this.obj); - expect(this.commit).toHaveBeenCalledWith('testMutationName', this.obj); + listener.callback(store, obj); + expect(commit).toHaveBeenCalledWith('testMutationName', obj); }); it('should trigger store.commit() with prefix', () => { - const l = this.listener.bind('testTable', CHANGE_TYPES.CREATED, 'testPrefix'); - l.callback(this.store, this.obj); - expect(this.commit).toHaveBeenCalledWith('testPrefix/testMutationName', this.obj); + const l = listener.bind('testTable', CHANGE_TYPES.CREATED, 'testPrefix'); + l.callback(store, obj); + expect(commit).toHaveBeenCalledWith('testPrefix/testMutationName', obj); }); }); }); -describe('dispatchListener', function() { +describe('dispatchListener', function () { + let dispatch; + let store; + let obj; + let listener; + beforeEach(() => { - this.dispatch = jest.fn(); - this.store = { - dispatch: this.dispatch, + dispatch = jest.fn(); + store = { + dispatch: dispatch, }; - this.obj = {}; - this.listener = dispatchListener('testMutationName'); + obj = {}; + listener = dispatchListener('testMutationName'); }); it('should return a Listener', () => { - expect(this.listener).toBeInstanceOf(Listener); + expect(listener).toBeInstanceOf(Listener); }); describe('returned Listener.callback', () => { it('should trigger store.dispatch()', () => { - this.listener.callback(this.store, this.obj); - expect(this.dispatch).toHaveBeenCalledWith('testMutationName', this.obj); + listener.callback(store, obj); + expect(dispatch).toHaveBeenCalledWith('testMutationName', obj); }); it('should trigger store.dispatch() with prefix', () => { - const l = this.listener.bind('testTable', CHANGE_TYPES.CREATED, 'testPrefix'); - l.callback(this.store, this.obj); - expect(this.dispatch).toHaveBeenCalledWith('testPrefix/testMutationName', this.obj); + const l = listener.bind('testTable', CHANGE_TYPES.CREATED, 'testPrefix'); + l.callback(store, obj); + expect(dispatch).toHaveBeenCalledWith('testPrefix/testMutationName', obj); }); }); }); -describe('IndexedDBPlugin', function() { +describe('IndexedDBPlugin', function () { + let source; + let db; + let store; + let changes; + let listeners; beforeEach(() => { - this.source = uuidv4(); - this.db = { + source = uuidv4(); + db = { events: new EventEmitter(), on(...args) { return this.events.on(...args); }, }; - this.dispatch = jest.fn(); - this.store = {}; - this.obj = {}; - this.changes = []; - this.listeners = []; + store = {}; + changes = []; + listeners = []; }); it('should listen for events on `db`', () => { - expect(this.db.events.listenerCount('changes')).toEqual(0); - IndexedDBPlugin(this.db, this.listeners); - expect(this.db.events.listenerCount('changes')).toEqual(1); + expect(db.events.listenerCount('changes')).toEqual(0); + IndexedDBPlugin(db, listeners); + expect(db.events.listenerCount('changes')).toEqual(1); }); it('should return a function that registers listeners', () => { const listener = new Listener(jest.fn()); const register = jest.spyOn(listener, 'register').mockImplementation(() => {}); - const result = IndexedDBPlugin(this.db, [listener]); + const result = IndexedDBPlugin(db, [listener]); expect(result).toBeInstanceOf(Function); - result(this.store); - expect(register).toHaveBeenCalledWith(expect.any(EventEmitter), this.store); + result(store); + expect(register).toHaveBeenCalledWith(expect.any(EventEmitter), store); }); it('should handle change events and trigger listeners', () => { - const testChange = (table, type, source = null, obj = null) => { - this.db[table] = { schema: { primKey: { keyPath: 'testId' } } }; + const testChange = (table, type, _source = null, obj = null) => { + db[table] = { schema: { primKey: { keyPath: 'testId' } } }; const change = { key: uuidv4(), table, type, - source: source || this.source, + source: _source || source, obj: obj || { test: uuidv4(), }, @@ -168,14 +184,14 @@ describe('IndexedDBPlugin', function() { test: uuidv4(), }, }; - this.changes.push(change); + changes.push(change); return change; }; const testListener = (table, type, namespacePrefix = null) => { const callback = jest.fn(); let callObj = null; - this.listeners.push(new Listener(callback).bind(table, type, namespacePrefix)); + listeners.push(new Listener(callback).bind(table, type, namespacePrefix)); return { addChange: (source, obj = null) => { const change = testChange.call(this, table, type, source, obj); @@ -185,10 +201,10 @@ describe('IndexedDBPlugin', function() { }; }, assertCalled: () => { - expect(callback).toHaveBeenCalledWith(this.store, callObj); + expect(callback).toHaveBeenCalledWith(store, callObj); }, assertNotCalled: () => { - expect(callback).not.toHaveBeenCalledWith(this.store, callObj); + expect(callback).not.toHaveBeenCalledWith(store, callObj); }, }; }; @@ -207,8 +223,8 @@ describe('IndexedDBPlugin', function() { listener5.addChange(); listener6.addChange(); - const result = IndexedDBPlugin(this.db, this.listeners); - result(this.store); + const result = IndexedDBPlugin(db, listeners); + result(store); listener1.assertNotCalled(); listener2.assertNotCalled(); @@ -217,7 +233,7 @@ describe('IndexedDBPlugin', function() { listener5.assertNotCalled(); listener6.assertNotCalled(); - this.db.events.emit('changes', this.changes); + db.events.emit('changes', changes); listener1.assertCalled(); listener2.assertCalled(); diff --git a/contentcuration/contentcuration/frontend/shared/vuex/persistFactory.js b/contentcuration/contentcuration/frontend/shared/vuex/persistFactory.js index 5d4536af96..5f1725ae06 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/persistFactory.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/persistFactory.js @@ -83,7 +83,7 @@ function prepareMutations(ns, mutations, prefixMutations) { const prefix = prefixMutations ? `${ns}/` : ''; return mutations.reduce( (mutationMap, mutation) => ({ ...mutationMap, [prefix + mutation]: mutation }), - {} + {}, ); } @@ -101,7 +101,7 @@ function prepareMutations(ns, mutations, prefixMutations) { export default function persistFactory(ns, mutations, prefixMutations = true) { const storage = PersistStorage.namespace(ns, prepareMutations(ns, mutations, prefixMutations)); - return function(store) { + return function (store) { store.subscribe(({ type, payload }) => { // Only triggered when the mutation is one we've been told to persist if (storage.shouldPersist(type, payload)) { @@ -125,7 +125,7 @@ export default function persistFactory(ns, mutations, prefixMutations = true) { export function persistAllFactory(ns, mutations, prefixMutations = true) { const storage = PersistStorage.namespace(ns, prepareMutations(ns, mutations, prefixMutations)); - return function(store) { + return function (store) { store.subscribe(({ type, payload }) => { // Only triggered when the mutation is one we've been told to persist if (storage.shouldPersist(type, payload)) { diff --git a/contentcuration/contentcuration/frontend/shared/vuex/policies/index.js b/contentcuration/contentcuration/frontend/shared/vuex/policies/index.js index e09ba0f5b5..ef14f56eda 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/policies/index.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/policies/index.js @@ -1,4 +1,4 @@ -import Vue from 'vue'; +import { set } from 'vue'; import { policyDates, policyKeys, createPolicyKey, policies } from 'shared/constants'; import client from 'shared/client'; @@ -41,12 +41,7 @@ export const getters = { nonAcceptedPolicies(state) { return policyKeys .filter(key => !state.policies[key]) - .map(key => - key - .split('_') - .slice(0, -3) - .join('_') - ); + .map(key => key.split('_').slice(0, -3).join('_')); }, /** * @returns `true` if a policy hasn't been @@ -54,7 +49,7 @@ export const getters = { * Always returns `false` for logged out users. */ isPolicyUnaccepted(state, getters, rootState, rootGetters) { - return function(policy) { + return function (policy) { if (!rootGetters.loggedIn) { return false; } @@ -84,7 +79,7 @@ export const getters = { export const mutations = { SET_POLICIES(state, policies) { for (const policy in policies) { - Vue.set(state.policies, policy, policies[policy]); + set(state.policies, policy, policies[policy]); } }, }; diff --git a/contentcuration/contentcuration/frontend/shared/vuex/policies/index.spec.js b/contentcuration/contentcuration/frontend/shared/vuex/policies/index.spec.js index 7bf7452676..a4110ad375 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/policies/index.spec.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/policies/index.spec.js @@ -59,8 +59,8 @@ describe('policies store', () => { {}, // state { nonAcceptedPolicies: [] }, // getters {}, // rootState - { loggedIn: true } // rootGetters - ) + { loggedIn: true }, // rootGetters + ), ).toBeNull(); }); @@ -71,8 +71,8 @@ describe('policies store', () => { {}, // state { nonAcceptedPolicies: ['privacy_policy', 'terms_of_service'] }, // getters {}, // rootState - { loggedIn: true } // rootGetters - ) + { loggedIn: true }, // rootGetters + ), ).toEqual('terms_of_service'); }); @@ -82,8 +82,8 @@ describe('policies store', () => { {}, // state { nonAcceptedPolicies: ['privacy_policy', 'terms_of_service'] }, // getters {}, // rootState - { loggedIn: false } // rootGetters - ) + { loggedIn: false }, // rootGetters + ), ).toBeNull(); }); }); @@ -103,8 +103,9 @@ describe('policies store', () => { const hour = ('0' + (now.getUTCHours() + 1)).slice(-2); const minute = ('0' + (now.getUTCMinutes() + 1)).slice(-2); - const expectedKey = `${testKey}_${date.getUTCFullYear()}_${date.getUTCMonth() + - 1}_${date.getUTCDate()}`; + const expectedKey = `${testKey}_${date.getUTCFullYear()}_${ + date.getUTCMonth() + 1 + }_${date.getUTCDate()}`; const expectedValue = `${day}/${month}/${year} ${hour}:${minute}`; expect(getters.getPolicyAcceptedData()(policies.PRIVACY)).toEqual({ diff --git a/contentcuration/contentcuration/frontend/shared/vuex/session/index.js b/contentcuration/contentcuration/frontend/shared/vuex/session/index.js index b0a2b10a05..6112870535 100644 --- a/contentcuration/contentcuration/frontend/shared/vuex/session/index.js +++ b/contentcuration/contentcuration/frontend/shared/vuex/session/index.js @@ -91,7 +91,7 @@ export default { * @param {string} flag - shared.constants.FeatureFlagKeys.* * @return {Boolean} */ - return function(flag) { + return function (flag) { return getters.isAdmin || Boolean(getters.featureFlags[flag]); }; }, @@ -138,7 +138,7 @@ export default { updateFullName(context, { first_name, last_name }) { context.commit('UPDATE_SESSION', { first_name, last_name }); }, - fetchUserStorage: debounce(function(context) { + fetchUserStorage: debounce(function (context) { return client.get(window.Urls.user_get_storage_used()).then(({ data }) => { return User.updateDiskSpaceUsed(context.getters.currentUserId, data).then(() => { context.commit('UPDATE_SESSION', { disk_space_used: data }); diff --git a/contentcuration/contentcuration/management/commands/count_public_resources.py b/contentcuration/contentcuration/management/commands/count_public_resources.py index 40b717b608..7c3d825f73 100644 --- a/contentcuration/contentcuration/management/commands/count_public_resources.py +++ b/contentcuration/contentcuration/management/commands/count_public_resources.py @@ -5,16 +5,19 @@ from contentcuration.models import Channel from contentcuration.models import ContentNode -logger = logging.getLogger('command') +logger = logging.getLogger("command") class Command(BaseCommand): - def handle(self, *args, **options): - public_tree_ids = Channel.objects.filter(public=True, deleted=False).values_list('main_tree__tree_id', flat=True) - count = ContentNode.objects.filter(tree_id__in=public_tree_ids) \ - .exclude(kind_id='topic') \ - .values('content_id', 'language_id') \ - .distinct() \ - .count() + public_tree_ids = Channel.objects.filter( + public=True, deleted=False + ).values_list("main_tree__tree_id", flat=True) + count = ( + ContentNode.objects.filter(tree_id__in=public_tree_ids) + .exclude(kind_id="topic") + .values("content_id", "language_id") + .distinct() + .count() + ) logger.info("{} unique resources".format(count)) diff --git a/contentcuration/contentcuration/management/commands/fix_duplicate_assessment_items.py b/contentcuration/contentcuration/management/commands/fix_duplicate_assessment_items.py index 96c86e3fa5..81d7809308 100644 --- a/contentcuration/contentcuration/management/commands/fix_duplicate_assessment_items.py +++ b/contentcuration/contentcuration/management/commands/fix_duplicate_assessment_items.py @@ -16,11 +16,16 @@ def handle(self, *args, **options): start = time.time() # Go through nodes that have assessment items with the same assessment_id logging.info("Looking for nodes with invalid assessments...") - nodes = ContentNode.objects.filter(kind_id='exercise') \ + nodes = ( + ContentNode.objects.filter(kind_id="exercise") .annotate( - num_ids=Count('assessment_items__pk'), - num_assessment_ids=Count('assessment_items__assessment_id', distinct=True) - ).exclude(num_ids=F('num_assessment_ids')) + num_ids=Count("assessment_items__pk"), + num_assessment_ids=Count( + "assessment_items__assessment_id", distinct=True + ), + ) + .exclude(num_ids=F("num_assessment_ids")) + ) total = nodes.count() logging.info("Fixing {} nodes...".format(total)) @@ -29,7 +34,9 @@ def handle(self, *args, **options): # Go through each node's assessment items for item in node.assessment_items.all(): # Handle duplicate assessment ids - other_duplicate_assessment_items = node.assessment_items.filter(assessment_id=item.assessment_id).exclude(pk=item.pk) + other_duplicate_assessment_items = node.assessment_items.filter( + assessment_id=item.assessment_id + ).exclude(pk=item.pk) if other_duplicate_assessment_items.exists(): # Remove duplicates @@ -37,14 +44,16 @@ def handle(self, *args, **options): question=item.question, answers=item.answers, hints=item.hints, - raw_data=item.raw_data + raw_data=item.raw_data, ).exists(): item.delete() # Get new ids for non-duplicates else: new_id = uuid.uuid4().hex - while node.assessment_items.filter(assessment_id=new_id).exists(): + while node.assessment_items.filter( + assessment_id=new_id + ).exists(): new_id = uuid.uuid4().hex item.assessment_id = new_id item.save() diff --git a/contentcuration/contentcuration/management/commands/fix_exercise_complete.py b/contentcuration/contentcuration/management/commands/fix_exercise_complete.py index f9ed6e903f..e3be0754c0 100644 --- a/contentcuration/contentcuration/management/commands/fix_exercise_complete.py +++ b/contentcuration/contentcuration/management/commands/fix_exercise_complete.py @@ -9,99 +9,209 @@ from contentcuration.models import ContentNode from contentcuration.models import License -logging = logmodule.getLogger('command') - +logging = logmodule.getLogger("command") CHUNKSIZE = 10000 class Command(BaseCommand): - def handle(self, *args, **options): start = time.time() reset_time = time.time() - mastery_model_exercise_count = ContentNode.objects.filter(kind_id=content_kinds.EXERCISE) \ - .filter(Q(extra_fields__has_key='mastery_model')).order_by().count() + mastery_model_exercise_count = ( + ContentNode.objects.filter(kind_id=content_kinds.EXERCISE) + .filter(Q(extra_fields__has_key="mastery_model")) + .order_by() + .count() + ) i = 0 while i < mastery_model_exercise_count: chunk_time = time.time() - update_ids = ContentNode.objects.filter(kind_id=content_kinds.EXERCISE) \ - .filter(Q(extra_fields__has_key='mastery_model')).order_by("id").values_list("id", flat=True)[i: i + CHUNKSIZE] + update_ids = ( + ContentNode.objects.filter(kind_id=content_kinds.EXERCISE) + .filter(Q(extra_fields__has_key="mastery_model")) + .order_by("id") + .values_list("id", flat=True)[i : i + CHUNKSIZE] + ) ContentNode.objects.filter(pk__in=update_ids).update(complete=True) - logging.info('Marked {} nodes as complete=True in {} seconds'.format(CHUNKSIZE, time.time() - chunk_time)) + logging.info( + "Marked {} nodes as complete=True in {} seconds".format( + CHUNKSIZE, time.time() - chunk_time + ) + ) i += CHUNKSIZE - mastery_model_exercise_count = ContentNode.objects.filter(kind_id=content_kinds.EXERCISE) \ - .filter(Q(extra_fields__has_key='option.completion_criteria.mastery_model')).order_by().count() + mastery_model_exercise_count = ( + ContentNode.objects.filter(kind_id=content_kinds.EXERCISE) + .filter(Q(extra_fields__has_key="option.completion_criteria.mastery_model")) + .order_by() + .count() + ) while i < mastery_model_exercise_count: chunk_time = time.time() - update_ids = ContentNode.objects.filter(kind_id=content_kinds.EXERCISE) \ - .filter(Q(extra_fields__has_key='option.completion_criteria.mastery_model')).order_by("id").values_list("id", flat=True)[i: i + CHUNKSIZE] + update_ids = ( + ContentNode.objects.filter(kind_id=content_kinds.EXERCISE) + .filter( + Q(extra_fields__has_key="option.completion_criteria.mastery_model") + ) + .order_by("id") + .values_list("id", flat=True)[i : i + CHUNKSIZE] + ) ContentNode.objects.filter(pk__in=update_ids).update(complete=True) - logging.info('Marked {} nodes as complete=True in {} seconds'.format(CHUNKSIZE, time.time() - chunk_time)) + logging.info( + "Marked {} nodes as complete=True in {} seconds".format( + CHUNKSIZE, time.time() - chunk_time + ) + ) i += CHUNKSIZE - logging.info('Marked all mastery_modeled exercises as complete=True (finished in {})'.format(time.time() - reset_time)) + logging.info( + "Marked all mastery_modeled exercises as complete=True (finished in {})".format( + time.time() - reset_time + ) + ) # Mark invalid titles titlestart = time.time() - logging.info('Marking blank titles...') - count = ContentNode.objects.exclude(complete=False).filter(kind_id=content_kinds.EXERCISE, title='').order_by().update(complete=False) - logging.info('Marked {} invalid titles (finished in {})'.format(count, time.time() - titlestart)) + logging.info("Marking blank titles...") + count = ( + ContentNode.objects.exclude(complete=False) + .filter(kind_id=content_kinds.EXERCISE, title="") + .order_by() + .update(complete=False) + ) + logging.info( + "Marked {} invalid titles (finished in {})".format( + count, time.time() - titlestart + ) + ) # Mark invalid licenses licensestart = time.time() - logging.info('Marking blank licenses...') - invalid_license_count = ContentNode.objects.filter(kind_id=content_kinds.EXERCISE, license__isnull=True)\ - .order_by().count() + logging.info("Marking blank licenses...") + invalid_license_count = ( + ContentNode.objects.filter( + kind_id=content_kinds.EXERCISE, license__isnull=True + ) + .order_by() + .count() + ) while i < invalid_license_count: chunk_time = time.time() - update_ids = ContentNode.objects.filter(kind_id=content_kinds.EXERCISE, license__isnull=True)\ - .order_by("id").values_list("id", flat=True)[i: i + CHUNKSIZE] + update_ids = ( + ContentNode.objects.filter( + kind_id=content_kinds.EXERCISE, license__isnull=True + ) + .order_by("id") + .values_list("id", flat=True)[i : i + CHUNKSIZE] + ) count = ContentNode.objects.filter(pk__in=update_ids).update(complete=False) - logging.info('Marked {} nodes as complete=False in {} seconds'.format(count, time.time() - chunk_time)) + logging.info( + "Marked {} nodes as complete=False in {} seconds".format( + count, time.time() - chunk_time + ) + ) i += CHUNKSIZE - logging.info('Marked {} invalid licenses (finished in {})'.format(invalid_license_count, time.time() - licensestart)) + logging.info( + "Marked {} invalid licenses (finished in {})".format( + invalid_license_count, time.time() - licensestart + ) + ) licensestart = time.time() - logging.info('Marking blank license descriptions...') - custom_licenses = list(License.objects.filter(is_custom=True).values_list("pk", flat=True)) - count = ContentNode.objects.exclude(complete=False)\ - .filter(kind_id=content_kinds.EXERCISE, license_id__in=custom_licenses).filter(Q(license_description__isnull=True) | Q(license_description=''))\ - .order_by().update(complete=False) - logging.info('Marked {} invalid license descriptions (finished in {})'.format(count, time.time() - licensestart)) + logging.info("Marking blank license descriptions...") + custom_licenses = list( + License.objects.filter(is_custom=True).values_list("pk", flat=True) + ) + count = ( + ContentNode.objects.exclude(complete=False) + .filter(kind_id=content_kinds.EXERCISE, license_id__in=custom_licenses) + .filter(Q(license_description__isnull=True) | Q(license_description="")) + .order_by() + .update(complete=False) + ) + logging.info( + "Marked {} invalid license descriptions (finished in {})".format( + count, time.time() - licensestart + ) + ) licensestart = time.time() - logging.info('Marking blank copyright holders...') - copyright_licenses = list(License.objects.filter(copyright_holder_required=True).values_list("pk", flat=True)) - blank_copyright_holder_count = ContentNode.objects\ - .filter(kind_id=content_kinds.EXERCISE, license_id__in=copyright_licenses).filter(Q(copyright_holder__isnull=True) | Q(copyright_holder=''))\ - .order_by().count() + logging.info("Marking blank copyright holders...") + copyright_licenses = list( + License.objects.filter(copyright_holder_required=True).values_list( + "pk", flat=True + ) + ) + blank_copyright_holder_count = ( + ContentNode.objects.filter( + kind_id=content_kinds.EXERCISE, license_id__in=copyright_licenses + ) + .filter(Q(copyright_holder__isnull=True) | Q(copyright_holder="")) + .order_by() + .count() + ) while i < blank_copyright_holder_count: chunk_time = time.time() - update_ids = ContentNode.objects.filter(kind_id=content_kinds.EXERCISE, license_id__in=copyright_licenses)\ - .filter(Q(copyright_holder__isnull=True) | Q(copyright_holder='')).order_by("id").values_list("id", flat=True)[i: i + CHUNKSIZE] + update_ids = ( + ContentNode.objects.filter( + kind_id=content_kinds.EXERCISE, license_id__in=copyright_licenses + ) + .filter(Q(copyright_holder__isnull=True) | Q(copyright_holder="")) + .order_by("id") + .values_list("id", flat=True)[i : i + CHUNKSIZE] + ) count = ContentNode.objects.filter(pk__in=update_ids).update(complete=False) - logging.info('Marked {} nodes as complete=False in {} seconds'.format(count, time.time() - chunk_time)) + logging.info( + "Marked {} nodes as complete=False in {} seconds".format( + count, time.time() - chunk_time + ) + ) i += CHUNKSIZE - logging.info('Marked {} invalid copyright holders (finished in {})'.format(blank_copyright_holder_count, time.time() - licensestart)) + logging.info( + "Marked {} invalid copyright holders (finished in {})".format( + blank_copyright_holder_count, time.time() - licensestart + ) + ) # Mark invalid exercises exercisestart = time.time() - logging.info('Marking mastery_model less exercises...') - count = ContentNode.objects.exclude(complete=False).filter(kind_id=content_kinds.EXERCISE).filter(~Q(extra_fields__has_key='mastery_model'))\ - .order_by().update(complete=False) - logging.info('Marked {} mastery_model less exercises(finished in {})'.format(count, time.time() - exercisestart)) + logging.info("Marking mastery_model less exercises...") + count = ( + ContentNode.objects.exclude(complete=False) + .filter(kind_id=content_kinds.EXERCISE) + .filter(~Q(extra_fields__has_key="mastery_model")) + .order_by() + .update(complete=False) + ) + logging.info( + "Marked {} mastery_model less exercises(finished in {})".format( + count, time.time() - exercisestart + ) + ) exercisestart = time.time() - logging.info('Marking bad mastery model exercises...') - count = ContentNode.objects.exclude(complete=False).filter(kind_id=content_kinds.EXERCISE)\ - .filter(Q(extra_fields__mastery_model=exercises.M_OF_N) & (~Q(extra_fields__has_key='m') | ~Q(extra_fields__has_key='n')))\ - .order_by().update(complete=False) - logging.info('Marked {} bad mastery model exercises (finished in {})'.format(count, time.time() - exercisestart)) - - logging.info('Mark incomplete command completed in {}s'.format(time.time() - start)) + logging.info("Marking bad mastery model exercises...") + count = ( + ContentNode.objects.exclude(complete=False) + .filter(kind_id=content_kinds.EXERCISE) + .filter( + Q(extra_fields__mastery_model=exercises.M_OF_N) + & (~Q(extra_fields__has_key="m") | ~Q(extra_fields__has_key="n")) + ) + .order_by() + .update(complete=False) + ) + logging.info( + "Marked {} bad mastery model exercises (finished in {})".format( + count, time.time() - exercisestart + ) + ) + + logging.info( + "Mark incomplete command completed in {}s".format(time.time() - start) + ) diff --git a/contentcuration/contentcuration/management/commands/garbage_collect.py b/contentcuration/contentcuration/management/commands/garbage_collect.py index 732a494aec..2255bbfbec 100644 --- a/contentcuration/contentcuration/management/commands/garbage_collect.py +++ b/contentcuration/contentcuration/management/commands/garbage_collect.py @@ -16,11 +16,10 @@ from contentcuration.utils.garbage_collect import clean_up_tasks -logging = logmodule.getLogger('command') +logging = logmodule.getLogger("command") class Command(BaseCommand): - def handle(self, *args, **options): """ Actual logic for garbage collection. @@ -29,7 +28,9 @@ def handle(self, *args, **options): # Clean up users that are soft deleted and are older than ACCOUNT_DELETION_BUFFER (90 days). # Also clean contentnodes, files and file objects on storage that are associated # with the orphan tree. - logging.info("Cleaning up soft deleted users older than ACCOUNT_DELETION_BUFFER (90 days)") + logging.info( + "Cleaning up soft deleted users older than ACCOUNT_DELETION_BUFFER (90 days)" + ) clean_up_soft_deleted_users() logging.info("Cleaning up contentnodes from the orphan tree") diff --git a/contentcuration/contentcuration/management/commands/loadconstants.py b/contentcuration/contentcuration/management/commands/loadconstants.py index 8451359db6..3c54ac2025 100644 --- a/contentcuration/contentcuration/management/commands/loadconstants.py +++ b/contentcuration/contentcuration/management/commands/loadconstants.py @@ -1,6 +1,4 @@ import logging as logmodule -from builtins import object -from builtins import str from django.conf import settings from django.contrib.sites.models import Site diff --git a/contentcuration/contentcuration/management/commands/mark_incomplete.py b/contentcuration/contentcuration/management/commands/mark_incomplete.py index 056634d7d8..3cbb74dcf8 100644 --- a/contentcuration/contentcuration/management/commands/mark_incomplete.py +++ b/contentcuration/contentcuration/management/commands/mark_incomplete.py @@ -13,123 +13,237 @@ from contentcuration.models import File from contentcuration.models import License -logging = logmodule.getLogger('command') +logging = logmodule.getLogger("command") class Command(BaseCommand): - def handle(self, *args, **options): start = time.time() # Mark invalid titles titlestart = time.time() - logging.info('Marking blank titles...') - count = ContentNode.objects.exclude(complete=False).filter(title='', parent__isnull=False).order_by().update(complete=False) - logging.info('Marked {} invalid titles (finished in {})'.format(count, time.time() - titlestart)) + logging.info("Marking blank titles...") + count = ( + ContentNode.objects.exclude(complete=False) + .filter(title="", parent__isnull=False) + .order_by() + .update(complete=False) + ) + logging.info( + "Marked {} invalid titles (finished in {})".format( + count, time.time() - titlestart + ) + ) # Mark invalid licenses licensestart = time.time() - logging.info('Marking blank licenses...') - count = ContentNode.objects.exclude(kind_id=content_kinds.TOPIC) \ - .exclude(complete=False) \ - .filter(license__isnull=True) \ - .order_by() \ + logging.info("Marking blank licenses...") + count = ( + ContentNode.objects.exclude(kind_id=content_kinds.TOPIC) + .exclude(complete=False) + .filter(license__isnull=True) + .order_by() .update(complete=False) - logging.info('Marked {} invalid licenses (finished in {})'.format(count, time.time() - licensestart)) + ) + logging.info( + "Marked {} invalid licenses (finished in {})".format( + count, time.time() - licensestart + ) + ) licensestart = time.time() - logging.info('Marking blank license descriptions...') - custom_licenses = list(License.objects.filter(is_custom=True).values_list("pk", flat=True)) - count = ContentNode.objects.exclude(kind_id=content_kinds.TOPIC) \ - .exclude(complete=False) \ - .filter(license_id__in=custom_licenses) \ - .filter(Q(license_description__isnull=True) | Q(license_description='')) \ - .order_by() \ + logging.info("Marking blank license descriptions...") + custom_licenses = list( + License.objects.filter(is_custom=True).values_list("pk", flat=True) + ) + count = ( + ContentNode.objects.exclude(kind_id=content_kinds.TOPIC) + .exclude(complete=False) + .filter(license_id__in=custom_licenses) + .filter(Q(license_description__isnull=True) | Q(license_description="")) + .order_by() .update(complete=False) - logging.info('Marked {} invalid license descriptions (finished in {})'.format(count, time.time() - licensestart)) + ) + logging.info( + "Marked {} invalid license descriptions (finished in {})".format( + count, time.time() - licensestart + ) + ) licensestart = time.time() - logging.info('Marking blank copyright holders...') - copyright_licenses = list(License.objects.filter(copyright_holder_required=True).values_list("pk", flat=True)) - count = ContentNode.objects.exclude(kind_id=content_kinds.TOPIC) \ - .exclude(complete=False) \ - .filter(license_id__in=copyright_licenses) \ - .filter(Q(copyright_holder__isnull=True) | Q(copyright_holder='')) \ - .order_by() \ + logging.info("Marking blank copyright holders...") + copyright_licenses = list( + License.objects.filter(copyright_holder_required=True).values_list( + "pk", flat=True + ) + ) + count = ( + ContentNode.objects.exclude(kind_id=content_kinds.TOPIC) + .exclude(complete=False) + .filter(license_id__in=copyright_licenses) + .filter(Q(copyright_holder__isnull=True) | Q(copyright_holder="")) + .order_by() .update(complete=False) - logging.info('Marked {} invalid copyright holders (finished in {})'.format(count, time.time() - licensestart)) + ) + logging.info( + "Marked {} invalid copyright holders (finished in {})".format( + count, time.time() - licensestart + ) + ) # Mark invalid file resources resourcestart = time.time() - logging.info('Marking file resources...') - file_check_query = With(File.objects.filter(preset__supplementary=False).values("contentnode_id").order_by(), name="t_file") - - query = file_check_query.join(ContentNode, id=file_check_query.col.contentnode_id, _join_type=LOUTER) \ - .with_cte(file_check_query) \ - .annotate(t_contentnode_id=file_check_query.col.contentnode_id) \ - .exclude(kind_id=content_kinds.TOPIC) \ - .exclude(kind_id=content_kinds.EXERCISE) \ - .exclude(complete=False) \ - .filter(t_contentnode_id__isnull=True) \ + logging.info("Marking file resources...") + file_check_query = With( + File.objects.filter(preset__supplementary=False) + .values("contentnode_id") + .order_by(), + name="t_file", + ) + + query = ( + file_check_query.join( + ContentNode, id=file_check_query.col.contentnode_id, _join_type=LOUTER + ) + .with_cte(file_check_query) + .annotate(t_contentnode_id=file_check_query.col.contentnode_id) + .exclude(kind_id=content_kinds.TOPIC) + .exclude(kind_id=content_kinds.EXERCISE) + .exclude(complete=False) + .filter(t_contentnode_id__isnull=True) .order_by() - count = ContentNode.objects.filter(id__in=query.order_by().values_list('id', flat=True)).update(complete=False) - logging.info('Marked {} invalid file resources (finished in {})'.format(count, time.time() - resourcestart)) + ) + count = ContentNode.objects.filter( + id__in=query.order_by().values_list("id", flat=True) + ).update(complete=False) + logging.info( + "Marked {} invalid file resources (finished in {})".format( + count, time.time() - resourcestart + ) + ) # Mark invalid exercises exercisestart = time.time() - logging.info('Marking exercises...') - - has_questions_query = With(AssessmentItem.objects.all().values("contentnode_id").order_by(), name="t_assessmentitem") - - query = has_questions_query.join(ContentNode, id=has_questions_query.col.contentnode_id, _join_type=LOUTER) \ - .with_cte(has_questions_query) \ - .annotate(t_contentnode_id=has_questions_query.col.contentnode_id) \ - .filter(kind_id=content_kinds.EXERCISE) \ - .exclude(complete=False) \ - .filter(t_contentnode_id__isnull=True) \ + logging.info("Marking exercises...") + + has_questions_query = With( + AssessmentItem.objects.all().values("contentnode_id").order_by(), + name="t_assessmentitem", + ) + + query = ( + has_questions_query.join( + ContentNode, + id=has_questions_query.col.contentnode_id, + _join_type=LOUTER, + ) + .with_cte(has_questions_query) + .annotate(t_contentnode_id=has_questions_query.col.contentnode_id) + .filter(kind_id=content_kinds.EXERCISE) + .exclude(complete=False) + .filter(t_contentnode_id__isnull=True) .order_by() + ) exercisestart = time.time() - count = ContentNode.objects.filter(id__in=query.order_by().values_list('id', flat=True)).update(complete=False) + count = ContentNode.objects.filter( + id__in=query.order_by().values_list("id", flat=True) + ).update(complete=False) - logging.info('Marked {} questionless exercises (finished in {})'.format(count, time.time() - exercisestart)) + logging.info( + "Marked {} questionless exercises (finished in {})".format( + count, time.time() - exercisestart + ) + ) exercisestart = time.time() - exercise_check_query = With(AssessmentItem.objects.exclude(type=exercises.PERSEUS_QUESTION) - .filter( - Q(question='') - | Q(answers='[]') - # hack to check if no correct answers - | (~Q(type=exercises.INPUT_QUESTION) & ~Q(answers__iregex=r'"correct":\s*true'))).order_by(), name="t_assessmentitem") - - query = exercise_check_query.join(ContentNode, id=has_questions_query.col.contentnode_id) \ - .with_cte(exercise_check_query) \ - .annotate(t_contentnode_id=exercise_check_query.col.contentnode_id) \ - .filter(kind_id=content_kinds.EXERCISE) \ - .exclude(complete=False) \ + exercise_check_query = With( + AssessmentItem.objects.exclude(type=exercises.PERSEUS_QUESTION) + .filter( + Q(question="") + | Q(answers="[]") + # hack to check if no correct answers + | ( + ~Q(type=exercises.INPUT_QUESTION) + & ~Q(answers__iregex=r'"correct":\s*true') + ) + ) + .order_by(), + name="t_assessmentitem", + ) + + query = ( + exercise_check_query.join( + ContentNode, id=has_questions_query.col.contentnode_id + ) + .with_cte(exercise_check_query) + .annotate(t_contentnode_id=exercise_check_query.col.contentnode_id) + .filter(kind_id=content_kinds.EXERCISE) + .exclude(complete=False) .order_by() + ) - count = ContentNode.objects.filter(id__in=query.order_by().values_list('id', flat=True)).update(complete=False) + count = ContentNode.objects.filter( + id__in=query.order_by().values_list("id", flat=True) + ).update(complete=False) - logging.info('Marked {} invalid exercises (finished in {})'.format(count, time.time() - exercisestart)) + logging.info( + "Marked {} invalid exercises (finished in {})".format( + count, time.time() - exercisestart + ) + ) exercisestart = time.time() - logging.info('Marking mastery_model less exercises...') - count = ContentNode.objects.exclude(complete=False).filter(kind_id=content_kinds.EXERCISE).filter(~Q(extra_fields__has_key='mastery_model')) \ - .order_by().update(complete=False) - - logging.info('Marked {} mastery_model less exercises(finished in {})'.format(count, time.time() - exercisestart)) - - count = ContentNode.objects.exclude(complete=False).filter(kind_id=content_kinds.EXERCISE).filter(~Q(extra_fields__has_key='mastery_model') & ~Q(extra_fields__has_key='option.completion_criteria.mastery_model')) \ - .order_by().update(complete=False) + logging.info("Marking mastery_model less exercises...") + count = ( + ContentNode.objects.exclude(complete=False) + .filter(kind_id=content_kinds.EXERCISE) + .filter(~Q(extra_fields__has_key="mastery_model")) + .order_by() + .update(complete=False) + ) + + logging.info( + "Marked {} mastery_model less exercises(finished in {})".format( + count, time.time() - exercisestart + ) + ) + + count = ( + ContentNode.objects.exclude(complete=False) + .filter(kind_id=content_kinds.EXERCISE) + .filter( + ~Q(extra_fields__has_key="mastery_model") + & ~Q(extra_fields__has_key="option.completion_criteria.mastery_model") + ) + .order_by() + .update(complete=False) + ) - logging.info('Marked {} mastery_model less exercises(finished in {})'.format(count, time.time() - exercisestart)) + logging.info( + "Marked {} mastery_model less exercises(finished in {})".format( + count, time.time() - exercisestart + ) + ) exercisestart = time.time() - logging.info('Marking bad mastery model exercises...') - count = ContentNode.objects.exclude(complete=False).filter(kind_id=content_kinds.EXERCISE) \ - .filter(Q(extra_fields__mastery_model=exercises.M_OF_N) & (~Q(extra_fields__has_key='m') | ~Q(extra_fields__has_key='n'))) \ - .order_by().update(complete=False) - logging.info('Marked {} bad mastery model exercises (finished in {})'.format(count, time.time() - exercisestart)) - - logging.info('Mark incomplete command completed in {}s'.format(time.time() - start)) + logging.info("Marking bad mastery model exercises...") + count = ( + ContentNode.objects.exclude(complete=False) + .filter(kind_id=content_kinds.EXERCISE) + .filter( + Q(extra_fields__mastery_model=exercises.M_OF_N) + & (~Q(extra_fields__has_key="m") | ~Q(extra_fields__has_key="n")) + ) + .order_by() + .update(complete=False) + ) + logging.info( + "Marked {} bad mastery model exercises (finished in {})".format( + count, time.time() - exercisestart + ) + ) + + logging.info( + "Mark incomplete command completed in {}s".format(time.time() - start) + ) diff --git a/contentcuration/contentcuration/management/commands/reconcile_change_tasks.py b/contentcuration/contentcuration/management/commands/reconcile_change_tasks.py index 4aa2f9f261..54af9c005b 100644 --- a/contentcuration/contentcuration/management/commands/reconcile_change_tasks.py +++ b/contentcuration/contentcuration/management/commands/reconcile_change_tasks.py @@ -6,7 +6,7 @@ from contentcuration.models import Change from contentcuration.models import User -logger = logging.getLogger('command') +logger = logging.getLogger("command") class Command(BaseCommand): @@ -18,26 +18,42 @@ def handle(self, *args, **options): from contentcuration.tasks import apply_channel_changes_task from contentcuration.tasks import apply_user_changes_task - active_task_ids = [task['id'] for task in app.get_active_and_reserved_tasks()] + active_task_ids = [task["id"] for task in app.get_active_and_reserved_tasks()] - channel_changes = Change.objects.filter(channel_id__isnull=False, applied=False, errored=False) \ - .order_by('channel_id', 'created_by_id') \ - .values('channel_id', 'created_by_id') \ + channel_changes = ( + Change.objects.filter( + channel_id__isnull=False, applied=False, errored=False + ) + .order_by("channel_id", "created_by_id") + .values("channel_id", "created_by_id") .distinct() + ) for channel_change in channel_changes: - apply_channel_changes_task.revoke(exclude_task_ids=active_task_ids, channel_id=channel_change['channel_id']) + apply_channel_changes_task.revoke( + exclude_task_ids=active_task_ids, + channel_id=channel_change["channel_id"], + ) apply_channel_changes_task.fetch_or_enqueue( - User.objects.get(pk=channel_change['created_by_id']), - channel_id=channel_change['channel_id'] + User.objects.get(pk=channel_change["created_by_id"]), + channel_id=channel_change["channel_id"], ) - user_changes = Change.objects.filter(channel_id__isnull=True, user_id__isnull=False, applied=False, errored=False) \ - .order_by('user_id', 'created_by_id') \ - .values('user_id', 'created_by_id') \ + user_changes = ( + Change.objects.filter( + channel_id__isnull=True, + user_id__isnull=False, + applied=False, + errored=False, + ) + .order_by("user_id", "created_by_id") + .values("user_id", "created_by_id") .distinct() + ) for user_change in user_changes: - apply_user_changes_task.revoke(exclude_task_ids=active_task_ids, user_id=user_change['user_id']) + apply_user_changes_task.revoke( + exclude_task_ids=active_task_ids, user_id=user_change["user_id"] + ) apply_user_changes_task.fetch_or_enqueue( - User.objects.get(pk=user_change['created_by_id']), - user_id=user_change['user_id'] + User.objects.get(pk=user_change["created_by_id"]), + user_id=user_change["user_id"], ) diff --git a/contentcuration/contentcuration/management/commands/reconcile_publishing_status.py b/contentcuration/contentcuration/management/commands/reconcile_publishing_status.py index ce97abf7a5..f5a3474c76 100644 --- a/contentcuration/contentcuration/management/commands/reconcile_publishing_status.py +++ b/contentcuration/contentcuration/management/commands/reconcile_publishing_status.py @@ -20,11 +20,18 @@ def handle(self, *args, **options): from contentcuration.tasks import apply_channel_changes_task # Channels that are in `publishing` state. - publishing_channels = list(Channel.objects.filter(deleted=False, main_tree__publishing=True).values_list("id", flat=True)) + publishing_channels = list( + Channel.objects.filter( + deleted=False, main_tree__publishing=True + ).values_list("id", flat=True) + ) # channel_ids of tasks that are currently being run by the celery workers. - active_channel_tasks = [task["kwargs"].get("channel_id") for task in app.get_active_tasks() - if task["name"] == apply_channel_changes_task.name] + active_channel_tasks = [ + task["kwargs"].get("channel_id") + for task in app.get_active_tasks() + if task["name"] == apply_channel_changes_task.name + ] # If channel is in publishing state and doesnot have any active task, # that means the worker has crashed. So, we reset the publishing state to False. @@ -33,4 +40,6 @@ def handle(self, *args, **options): channel = Channel.objects.get(pk=channel_id) channel.main_tree.publishing = False channel.main_tree.save() - logger.info(f"Resetted publishing status to False for channel {channel.id}.") + logger.info( + f"Resetted publishing status to False for channel {channel.id}." + ) diff --git a/contentcuration/contentcuration/management/commands/restore_channel.py b/contentcuration/contentcuration/management/commands/restore_channel.py index efaeb3ee7c..6133ec3806 100644 --- a/contentcuration/contentcuration/management/commands/restore_channel.py +++ b/contentcuration/contentcuration/management/commands/restore_channel.py @@ -4,26 +4,25 @@ from contentcuration.utils.import_tools import import_channel -logger = logging.getLogger('command') +logger = logging.getLogger("command") class Command(BaseCommand): - def add_arguments(self, parser): # ID of channel to read data from - parser.add_argument('source_id', type=str) + parser.add_argument("source_id", type=str) # ID of channel to write data to (can be same as source channel) - parser.add_argument('--target', help='restore channel db to TARGET CHANNEL ID') - parser.add_argument('--download-url', help='where to download db from') - parser.add_argument('--editor', help='add user as editor to channel') + parser.add_argument("--target", help="restore channel db to TARGET CHANNEL ID") + parser.add_argument("--download-url", help="where to download db from") + parser.add_argument("--editor", help="add user as editor to channel") def handle(self, *args, **options): # Set up variables for restoration process logger.info("\n\n********** STARTING CHANNEL RESTORATION **********") - source_id = options['source_id'] - target_id = options.get('target') or source_id - download_url = options.get('download_url') - editor = options.get('editor') + source_id = options["source_id"] + target_id = options.get("target") or source_id + download_url = options.get("download_url") + editor = options.get("editor") import_channel(source_id, target_id, download_url, editor, logger=logger) diff --git a/contentcuration/contentcuration/management/commands/set_content_mimetypes.py b/contentcuration/contentcuration/management/commands/set_content_mimetypes.py index 732d64f8d6..27af4732fc 100755 --- a/contentcuration/contentcuration/management/commands/set_content_mimetypes.py +++ b/contentcuration/contentcuration/management/commands/set_content_mimetypes.py @@ -18,18 +18,17 @@ class Command(BaseCommand): - def handle(self, *args, **kwargs): blobs = self._list_all_files() futures = [] with concurrent.futures.ThreadPoolExecutor() as e: - print("Scheduling all metadata update jobs...") + print("Scheduling all metadata update jobs...") # noqa: T201 for blob in blobs: future = e.submit(self._update_metadata, blob) futures.append(future) - print("Waiting for all jobs to finish...") + print("Waiting for all jobs to finish...") # noqa: T201 def _determine_cache_control(self, name): _, ext = os.path.splitext(name) diff --git a/contentcuration/contentcuration/management/commands/set_default_learning_activities.py b/contentcuration/contentcuration/management/commands/set_default_learning_activities.py index b6202477fe..e1105b70e5 100644 --- a/contentcuration/contentcuration/management/commands/set_default_learning_activities.py +++ b/contentcuration/contentcuration/management/commands/set_default_learning_activities.py @@ -6,31 +6,46 @@ from contentcuration.constants.contentnode import kind_activity_map from contentcuration.models import ContentNode -logging = logmodule.getLogger('command') +logging = logmodule.getLogger("command") CHUNKSIZE = 10000 class Command(BaseCommand): - def handle(self, *args, **options): start = time.time() for kind, activity in kind_activity_map.items(): kind_start = time.time() - map_to_set = { - activity: True - } + map_to_set = {activity: True} - null_learning_activities = ContentNode.objects.filter(kind=kind, learning_activities__isnull=True).values_list("id", flat=True) + null_learning_activities = ContentNode.objects.filter( + kind=kind, learning_activities__isnull=True + ).values_list("id", flat=True) - logging.info("Setting default learning activities for kind: {}".format(kind)) + logging.info( + "Setting default learning activities for kind: {}".format(kind) + ) while null_learning_activities.exists(): - updated_count = ContentNode.objects.filter(id__in=null_learning_activities[0:CHUNKSIZE]).update(learning_activities=map_to_set) - logging.info("Updated {} content nodes of kind {} with learning activity {}".format(updated_count, kind, activity)) - - logging.info("Finished setting default learning activities for kind: {} in {} seconds".format(kind, time.time() - kind_start)) - - logging.info('Finished setting all null learning activities in {} seconds'.format(time.time() - start)) + updated_count = ContentNode.objects.filter( + id__in=null_learning_activities[0:CHUNKSIZE] + ).update(learning_activities=map_to_set) + logging.info( + "Updated {} content nodes of kind {} with learning activity {}".format( + updated_count, kind, activity + ) + ) + + logging.info( + "Finished setting default learning activities for kind: {} in {} seconds".format( + kind, time.time() - kind_start + ) + ) + + logging.info( + "Finished setting all null learning activities in {} seconds".format( + time.time() - start + ) + ) diff --git a/contentcuration/contentcuration/management/commands/set_file_duration.py b/contentcuration/contentcuration/management/commands/set_file_duration.py index 77446c9853..958b05dba3 100644 --- a/contentcuration/contentcuration/management/commands/set_file_duration.py +++ b/contentcuration/contentcuration/management/commands/set_file_duration.py @@ -7,7 +7,7 @@ from contentcuration.models import File from contentcuration.models import MEDIA_PRESETS -logging = logmodule.getLogger('command') +logging = logmodule.getLogger("command") CHUNKSIZE = 10000 @@ -31,7 +31,7 @@ def extract_duration_of_media(f_in, extension): # noqa C901 "panic", "-f", extension, - "-" + "-", ], stdin=f_in, ) @@ -52,7 +52,7 @@ def extract_duration_of_media(f_in, extension): # noqa C901 "-", ], stdin=f_in, - stderr=subprocess.PIPE + stderr=subprocess.PIPE, ) try: second_last_line = result.stderr.decode("utf-8").strip().splitlines()[-2] @@ -76,22 +76,25 @@ def extract_duration_of_media(f_in, extension): # noqa C901 class Command(BaseCommand): - def handle(self, *args, **options): start = time.time() - logging.info("Setting default duration for media presets: {}".format(MEDIA_PRESETS)) + logging.info( + "Setting default duration for media presets: {}".format(MEDIA_PRESETS) + ) excluded_files = set() - null_duration = File.objects.filter(preset_id__in=MEDIA_PRESETS, duration__isnull=True) + null_duration = File.objects.filter( + preset_id__in=MEDIA_PRESETS, duration__isnull=True + ) null_duration_count = null_duration.count() updated_count = 0 i = 0 while i < null_duration_count: - for file in null_duration[i:i + CHUNKSIZE]: + for file in null_duration[i : i + CHUNKSIZE]: if file.file_on_disk.name in excluded_files: continue file.refresh_from_db() @@ -99,16 +102,26 @@ def handle(self, *args, **options): continue try: with file.file_on_disk.open() as f: - duration = extract_duration_of_media(f, file.file_format.extension) + duration = extract_duration_of_media( + f, file.file_format.extension + ) if duration: - updated_count += File.objects.filter(checksum=file.checksum, preset_id__in=MEDIA_PRESETS).update(duration=duration) + updated_count += File.objects.filter( + checksum=file.checksum, preset_id__in=MEDIA_PRESETS + ).update(duration=duration) except FileNotFoundError: logging.warning("File {} not found".format(file)) excluded_files.add(file.file_on_disk.name) except (subprocess.CalledProcessError, RuntimeError): - logging.warning("File {} could not be read for duration".format(file)) + logging.warning( + "File {} could not be read for duration".format(file) + ) excluded_files.add(file.file_on_disk.name) i += CHUNKSIZE - logging.info('Finished setting all null duration for {} files in {} seconds'.format(updated_count, time.time() - start)) + logging.info( + "Finished setting all null duration for {} files in {} seconds".format( + updated_count, time.time() - start + ) + ) diff --git a/contentcuration/contentcuration/management/commands/set_orm_based_has_captions.py b/contentcuration/contentcuration/management/commands/set_orm_based_has_captions.py index 38865f6b89..32d2659173 100644 --- a/contentcuration/contentcuration/management/commands/set_orm_based_has_captions.py +++ b/contentcuration/contentcuration/management/commands/set_orm_based_has_captions.py @@ -11,38 +11,57 @@ from contentcuration.models import ContentNode from contentcuration.models import File -logging = logmodule.getLogger('command') +logging = logmodule.getLogger("command") CHUNKSIZE = 10000 class Command(BaseCommand): - def handle(self, *args, **options): start = time.time() logging.info("Setting 'has captions' for audio kinds") - has_captions_subquery = Exists(File.objects.filter(contentnode=OuterRef("id"), language=OuterRef("language"), preset_id=format_presets.VIDEO_SUBTITLE)) + has_captions_subquery = Exists( + File.objects.filter( + contentnode=OuterRef("id"), + language=OuterRef("language"), + preset_id=format_presets.VIDEO_SUBTITLE, + ) + ) # Only try to update audio nodes which have not had any accessibility labels set on them # this will allow this management command to be rerun and resume from where it left off # and also prevent stomping previous edits to the accessibility_labels field. - updateable_nodes = ContentNode.objects.filter(has_captions_subquery, kind=content_kinds.AUDIO, accessibility_labels__isnull=True) + updateable_nodes = ContentNode.objects.filter( + has_captions_subquery, + kind=content_kinds.AUDIO, + accessibility_labels__isnull=True, + ) - updateable_node_slice = updateable_nodes.values_list("id", flat=True)[0:CHUNKSIZE] + updateable_node_slice = updateable_nodes.values_list("id", flat=True)[ + 0:CHUNKSIZE + ] count = 0 while updateable_nodes.exists(): this_count = ContentNode.objects.filter( id__in=updateable_node_slice - ).update(accessibility_labels={accessibility_categories.CAPTIONS_SUBTITLES: True}) + ).update( + accessibility_labels={accessibility_categories.CAPTIONS_SUBTITLES: True} + ) logging.info("Set has captions metadata for {} nodes".format(this_count)) count += this_count - updateable_node_slice = updateable_nodes.values_list("id", flat=True)[0:CHUNKSIZE] + updateable_node_slice = updateable_nodes.values_list("id", flat=True)[ + 0:CHUNKSIZE + ] - logging.info('Finished setting all has captions metadata for {} nodes in {} seconds'.format(count, time.time() - start)) + logging.info( + "Finished setting all has captions metadata for {} nodes in {} seconds".format( + count, time.time() - start + ) + ) diff --git a/contentcuration/contentcuration/management/commands/set_storage_used.py b/contentcuration/contentcuration/management/commands/set_storage_used.py index 906ac580e7..52a185ab81 100644 --- a/contentcuration/contentcuration/management/commands/set_storage_used.py +++ b/contentcuration/contentcuration/management/commands/set_storage_used.py @@ -13,7 +13,11 @@ def add_arguments(self, parser): parser.add_argument("--force", action="store_true", dest="force", default=False) def handle(self, *args, **options): - users = User.objects.all() if options["force"] else User.objects.filter(disk_space_used=0) + users = ( + User.objects.all() + if options["force"] + else User.objects.filter(disk_space_used=0) + ) for index, user in enumerate(users): user.set_space_used() logger.info("Updated storage used for {} user(s)".format(index + 1)) diff --git a/contentcuration/contentcuration/management/commands/setup.py b/contentcuration/contentcuration/management/commands/setup.py index 305f9318ff..16478297f0 100644 --- a/contentcuration/contentcuration/management/commands/setup.py +++ b/contentcuration/contentcuration/management/commands/setup.py @@ -40,30 +40,35 @@ class Command(BaseCommand): - def add_arguments(self, parser): - parser.add_argument('--email', dest="email", default="a@a.com") - parser.add_argument('--password', dest="password", default="a") + parser.add_argument("--email", dest="email", default="a@a.com") + parser.add_argument("--password", dest="password", default="a") + parser.add_argument( + "--clean-data-state", + action="store_true", + default=False, + help="Sets database in clean state.", + ) def handle(self, *args, **options): # Validate email email = options["email"] password = options["password"] if not re.match(r"[^@]+@[^@]+\.[^@]+", email): - print("{} is not a valid email".format(email)) + print("{} is not a valid email".format(email)) # noqa: T201 sys.exit() # create the cache table try: call_command("createcachetable") except DBError as e: - logging.error('Error creating cache table: {}'.format(str(e))) + logging.error("Error creating cache table: {}".format(str(e))) # Run migrations - call_command('migrate') + call_command("migrate") # Run loadconstants - call_command('loadconstants') + call_command("loadconstants") # Set up user as admin admin = create_user(email, password, "Admin", "User", admin=True) @@ -73,69 +78,124 @@ def handle(self, *args, **options): user2 = create_user("user@b.com", "b", "User", "B") user3 = create_user("user@c.com", "c", "User", "C") - # Create channels - - channel1 = create_channel("Published Channel", DESCRIPTION, editors=[admin], bookmarkers=[user1, user2], public=True) - channel2 = create_channel("Ricecooker Channel", DESCRIPTION, editors=[admin, user1], bookmarkers=[user2], viewers=[user3]) - channel3 = create_channel("Empty Channel", editors=[user3], viewers=[user2]) - channel4 = create_channel("Imported Channel", editors=[admin]) - - # Invite admin to channel 3 - try: - invitation, _new = Invitation.objects.get_or_create( - invited=admin, - sender=user3, - channel=channel3, - email=admin.email, + # Only create additional data when clean-data-state is False (i.e. default behaviour). + if options["clean_data_state"] is False: + # Create channels + channel1 = create_channel( + "Published Channel", + DESCRIPTION, + editors=[admin], + bookmarkers=[user1, user2], + public=True, ) - invitation.share_mode = "edit" - invitation.save() - except MultipleObjectsReturned: - # we don't care, just continue - pass + channel2 = create_channel( + "Ricecooker Channel", + DESCRIPTION, + editors=[admin, user1], + bookmarkers=[user2], + viewers=[user3], + ) + channel3 = create_channel("Empty Channel", editors=[user3], viewers=[user2]) + channel4 = create_channel("Imported Channel", editors=[admin]) - # Create pool of tags - tags = [] - for t in TAGS: - tag, _new = ContentTag.objects.get_or_create(tag_name=t, channel=channel1) + # Invite admin to channel 3 + try: + invitation, _new = Invitation.objects.get_or_create( + invited=admin, + sender=user3, + channel=channel3, + email=admin.email, + ) + invitation.share_mode = "edit" + invitation.save() + except MultipleObjectsReturned: + # we don't care, just continue + pass - # Generate file objects - document_file = create_file("Sample Document", format_presets.DOCUMENT, file_formats.PDF, user=admin) - video_file = create_file("Sample Video", format_presets.VIDEO_HIGH_RES, file_formats.MP4, user=admin) - subtitle_file = create_file("Sample Subtitle", format_presets.VIDEO_SUBTITLE, file_formats.VTT, user=admin) - audio_file = create_file("Sample Audio", format_presets.AUDIO, file_formats.MP3, user=admin) - html5_file = create_file("Sample HTML", format_presets.HTML5_ZIP, file_formats.HTML5, user=admin) + # Create pool of tags + tags = [] + for t in TAGS: + tag, _new = ContentTag.objects.get_or_create( + tag_name=t, channel=channel1 + ) - # Populate channel 1 with content - generate_tree(channel1.main_tree, document_file, video_file, subtitle_file, audio_file, html5_file, user=admin, tags=tags) + # Generate file objects + document_file = create_file( + "Sample Document", format_presets.DOCUMENT, file_formats.PDF, user=admin + ) + video_file = create_file( + "Sample Video", + format_presets.VIDEO_HIGH_RES, + file_formats.MP4, + user=admin, + ) + subtitle_file = create_file( + "Sample Subtitle", + format_presets.VIDEO_SUBTITLE, + file_formats.VTT, + user=admin, + ) + audio_file = create_file( + "Sample Audio", format_presets.AUDIO, file_formats.MP3, user=admin + ) + html5_file = create_file( + "Sample HTML", format_presets.HTML5_ZIP, file_formats.HTML5, user=admin + ) - # Populate channel 2 with staged content - channel2.ricecooker_version = "0.0.0" - channel2.save() - generate_tree(channel2.staging_tree, document_file, video_file, subtitle_file, audio_file, html5_file, user=admin, tags=tags) + # Populate channel 1 with content + generate_tree( + channel1.main_tree, + document_file, + video_file, + subtitle_file, + audio_file, + html5_file, + user=admin, + tags=tags, + ) - # Import content from channel 1 into channel 4 - channel1.main_tree.children.first().copy_to(channel4.main_tree) + # Populate channel 2 with staged content + channel2.ricecooker_version = "0.0.0" + channel2.save() + generate_tree( + channel2.staging_tree, + document_file, + video_file, + subtitle_file, + audio_file, + html5_file, + user=admin, + tags=tags, + ) - # Get validation to be reflected in nodes properly - ContentNode.objects.all().update(complete=True) - call_command('mark_incomplete') + # Import content from channel 1 into channel 4 + channel1.main_tree.children.first().copy_to(channel4.main_tree) - # Mark this node as incomplete even though it is complete - # for testing purposes - node = ContentNode.objects.get(tree_id=channel1.main_tree.tree_id, title="Sample Audio") - node.complete = False - node.save() + # Get validation to be reflected in nodes properly + ContentNode.objects.all().update(complete=True) + call_command("mark_incomplete") - # Publish - publish_channel(admin.id, channel1.pk) + # Mark this node as incomplete even though it is complete + # for testing purposes + node = ContentNode.objects.get( + tree_id=channel1.main_tree.tree_id, title="Sample Audio" + ) + node.complete = False + node.save() + + # Publish + publish_channel(admin.id, channel1.pk) - # Add nodes to clipboard in legacy way - legacy_clipboard_nodes = channel1.main_tree.get_children() - for legacy_node in legacy_clipboard_nodes: - legacy_node.copy_to(target=user1.clipboard_tree) + # Add nodes to clipboard in legacy way + legacy_clipboard_nodes = channel1.main_tree.get_children() + for legacy_node in legacy_clipboard_nodes: + legacy_node.copy_to(target=user1.clipboard_tree) - print("\n\n\nSETUP DONE: Log in as admin to view data (email: {}, password: {})\n\n\n".format(email, password)) + print( # noqa: T201 + "\n\n\nSETUP DONE: Log in as admin to view data (email: {}, password: {})\n\n\n".format( + email, password + ) + ) def generate_tree(root, document, video, subtitle, audio, html5, user=None, tags=None): @@ -146,18 +206,60 @@ def generate_tree(root, document, video, subtitle, audio, html5, user=None, tags # Add files to topic 1 license_id = License.objects.get(license_name=LICENSE).pk - topic1_video_node = create_contentnode("Sample Video", topic1, video, content_kinds.VIDEO, license_id, user=user, tags=tags) + topic1_video_node = create_contentnode( + "Sample Video", + topic1, + video, + content_kinds.VIDEO, + license_id, + user=user, + tags=tags, + ) duplicate_file(subtitle, node=topic1_video_node) - topic1_document_node = create_contentnode("Sample Document", topic1, document, content_kinds.DOCUMENT, license_id, user=user, tags=tags) - topic1_audio_node = create_contentnode("Sample Audio", topic1, audio, content_kinds.AUDIO, license_id, user=user, tags=tags) - topic1_html5_node = create_contentnode("Sample HTML", topic1, html5, content_kinds.HTML5, license_id, user=user, tags=tags) - topic1_exercise_node = create_exercise("Sample Exercise", topic1, license_id, user=user) + topic1_document_node = create_contentnode( + "Sample Document", + topic1, + document, + content_kinds.DOCUMENT, + license_id, + user=user, + tags=tags, + ) + topic1_audio_node = create_contentnode( + "Sample Audio", + topic1, + audio, + content_kinds.AUDIO, + license_id, + user=user, + tags=tags, + ) + topic1_html5_node = create_contentnode( + "Sample HTML", + topic1, + html5, + content_kinds.HTML5, + license_id, + user=user, + tags=tags, + ) + topic1_exercise_node = create_exercise( + "Sample Exercise", topic1, license_id, user=user + ) create_exercise("Sample Empty Exercise", topic1, license_id, user=user, empty=True) # Setup pre/post-requisites around Exercise node # Topic 1 Video -> Topic 1 Document -> Topic 1 Exercise -> Topic 1 Audio -> Topic 1 Html5 - PrerequisiteContentRelationship.objects.create(target_node_id=topic1_document_node.id, prerequisite_id=topic1_video_node.id) - PrerequisiteContentRelationship.objects.create(target_node_id=topic1_exercise_node.id, prerequisite_id=topic1_document_node.id) - PrerequisiteContentRelationship.objects.create(target_node_id=topic1_audio_node.id, prerequisite_id=topic1_exercise_node.id) - PrerequisiteContentRelationship.objects.create(target_node_id=topic1_html5_node.id, prerequisite_id=topic1_audio_node.id) + PrerequisiteContentRelationship.objects.create( + target_node_id=topic1_document_node.id, prerequisite_id=topic1_video_node.id + ) + PrerequisiteContentRelationship.objects.create( + target_node_id=topic1_exercise_node.id, prerequisite_id=topic1_document_node.id + ) + PrerequisiteContentRelationship.objects.create( + target_node_id=topic1_audio_node.id, prerequisite_id=topic1_exercise_node.id + ) + PrerequisiteContentRelationship.objects.create( + target_node_id=topic1_html5_node.id, prerequisite_id=topic1_audio_node.id + ) diff --git a/contentcuration/contentcuration/management/commands/setup_perftest_data.py b/contentcuration/contentcuration/management/commands/setup_perftest_data.py index da67679cd4..18fbadae54 100644 --- a/contentcuration/contentcuration/management/commands/setup_perftest_data.py +++ b/contentcuration/contentcuration/management/commands/setup_perftest_data.py @@ -3,8 +3,8 @@ from contentcuration.models import ContentNode from contentcuration.utils.db_tools import create_channel -from contentcuration.utils.db_tools import TreeBuilder from contentcuration.utils.db_tools import create_user +from contentcuration.utils.db_tools import TreeBuilder LICENSE = licenses.SPECIAL_PERMISSIONS @@ -20,7 +20,7 @@ def handle(self, *args, **options): self.editor.clipboard_tree.get_descendants().delete() with ContentNode.objects.delay_mptt_updates(): - print("Creating channel...") + print("Creating channel...") # noqa: T201 self.generate_random_channels() # Make sure we have a channel with a lot of root topics to test initial channel load. @@ -33,7 +33,7 @@ def handle(self, *args, **options): self.editor.clipboard_tree = TreeBuilder( levels=2, num_children=25, user=self.editor ).root - print( + print( # noqa: T201 "Created clipboard with {} nodes".format( self.editor.clipboard_tree.get_descendants().count() ) @@ -47,7 +47,7 @@ def generate_random_channels(self, num_channels=1): new_channel.main_tree = TreeBuilder(user=self.editor).root - print( + print( # noqa: T201 "Created channel with {} nodes".format( new_channel.main_tree.get_descendants().count() ) @@ -55,4 +55,4 @@ def generate_random_channels(self, num_channels=1): # make sure we have a trash tree so that can be tested with real data as well. new_channel.trash_tree = TreeBuilder(user=self.editor).root - print("Created channel with id {}".format(new_channel.pk)) + print("Created channel with id {}".format(new_channel.pk)) # noqa: T201 diff --git a/contentcuration/contentcuration/management/commands/test_server_perf.py b/contentcuration/contentcuration/management/commands/test_server_perf.py index 8123e9ec66..b17f0d8081 100644 --- a/contentcuration/contentcuration/management/commands/test_server_perf.py +++ b/contentcuration/contentcuration/management/commands/test_server_perf.py @@ -5,15 +5,15 @@ class Command(BaseCommand): - help = 'Runs db tests and reports the performance results. (Usage: test_server_perf [num_objects=100])' + help = "Runs db tests and reports the performance results. (Usage: test_server_perf [num_objects=100])" def add_arguments(self, parser): pass # ID of channel to read data from - parser.add_argument('--num_objects', type=int, default=100) + parser.add_argument("--num_objects", type=int, default=100) # ID of channel to write data to (can be same as source channel) - parser.add_argument('--stress-test', action='store_true', default=False) + parser.add_argument("--stress-test", action="store_true", default=False) def handle(self, *args, **options): objects = None @@ -21,23 +21,34 @@ def handle(self, *args, **options): objects = objective.Objective() stats = {} - num_objects = options['num_objects'] + num_objects = options["num_objects"] num_runs = 10 - object_types = ['ContentNode', 'File'] + object_types = ["ContentNode", "File"] for object_type in object_types: - stats[object_type] = objects.get_object_creation_stats(object_type, num_objects, num_runs) + stats[object_type] = objects.get_object_creation_stats( + object_type, num_objects, num_runs + ) - stats['ContentNode-mptt-delay'] = objects.get_object_creation_stats_mptt_delay(num_objects, num_runs) - object_types.append('ContentNode-mptt-delay') + stats[ + "ContentNode-mptt-delay" + ] = objects.get_object_creation_stats_mptt_delay(num_objects, num_runs) + object_types.append("ContentNode-mptt-delay") print() print("Test results:") for object_type in object_types: run_stats = stats[object_type] - print("Stats for creating {} {} objects over {} runs: {}".format(num_objects, object_type, num_runs, run_stats)) - - if options['stress_test']: - print("Running stress test simulating creation / cloning of a channel like KA, this will take at least several minutes. Please do not interrupt if possible!") + print( + "Stats for creating {} {} objects over {} runs: {}".format( + num_objects, object_type, num_runs, run_stats + ) + ) + + if options["stress_test"]: + print( # noqa: T201 + "Running stress test simulating creation / cloning of a channel like KA, " + "this will take at least several minutes. Please do not interrupt if possible!" + ) stats = objects.get_large_channel_creation_stats() for stat in stats: print("{}: {}".format(stat, stats[stat])) diff --git a/contentcuration/contentcuration/middleware/db_readonly.py b/contentcuration/contentcuration/middleware/db_readonly.py index 34c3c077e0..958da79751 100644 --- a/contentcuration/contentcuration/middleware/db_readonly.py +++ b/contentcuration/contentcuration/middleware/db_readonly.py @@ -9,16 +9,18 @@ class MiddlewareMixin(object): pass + from readonly.exceptions import DatabaseWriteDenied class DatabaseReadOnlyMiddleware(MiddlewareMixin): - def process_exception(self, request, exception): # Only process DatabaseWriteDenied exceptions if not isinstance(exception, DatabaseWriteDenied): return None # Handle the exception - if request.method != 'GET': - return HttpResponseBadRequest(_('The site is currently in read-only mode. Please try again later.')) + if request.method != "GET": + return HttpResponseBadRequest( + _("The site is currently in read-only mode. Please try again later.") + ) diff --git a/contentcuration/contentcuration/middleware/error_reporting.py b/contentcuration/contentcuration/middleware/error_reporting.py index c6b5a099c1..cfea9e797c 100644 --- a/contentcuration/contentcuration/middleware/error_reporting.py +++ b/contentcuration/contentcuration/middleware/error_reporting.py @@ -4,12 +4,11 @@ class ErrorReportingMiddleware(object): - def __init__(self, *args, **kwargs): self.client = error_reporting.Client.from_service_account_json( os.getenv("GOOGLE_APPLICATION_CREDENTIALS"), service=os.getenv("GCLOUD_DEBUGGER_APP_IDENTIFIER"), - _use_grpc=False + _use_grpc=False, ) def process_exception(self, request, exception): diff --git a/contentcuration/contentcuration/middleware/locale.py b/contentcuration/contentcuration/middleware/locale.py index 965312c0fa..edb62b3281 100644 --- a/contentcuration/contentcuration/middleware/locale.py +++ b/contentcuration/contentcuration/middleware/locale.py @@ -24,4 +24,6 @@ def process_view(self, request, callback, callback_args, callback_kwargs): def process_response(self, request, response): if self._is_exempt(request): return response - return super(KolibriStudioLocaleMiddleware, self).process_response(request, response) + return super(KolibriStudioLocaleMiddleware, self).process_response( + request, response + ) diff --git a/contentcuration/contentcuration/middleware/session.py b/contentcuration/contentcuration/middleware/session.py index 35fb81a367..c110650fe2 100644 --- a/contentcuration/contentcuration/middleware/session.py +++ b/contentcuration/contentcuration/middleware/session.py @@ -20,4 +20,6 @@ def process_view(self, request, callback, callback_args, callback_kwargs): def process_response(self, request, response): if self._is_exempt(request): return response - return super(KolibriStudioSessionMiddleware, self).process_response(request, response) + return super(KolibriStudioSessionMiddleware, self).process_response( + request, response + ) diff --git a/contentcuration/contentcuration/migration_production_settings.py b/contentcuration/contentcuration/migration_production_settings.py index e4b948c5c7..610a428525 100644 --- a/contentcuration/contentcuration/migration_production_settings.py +++ b/contentcuration/contentcuration/migration_production_settings.py @@ -1,8 +1,8 @@ # Settings used by migrations. This removes the need for Redis during migration jobs - +# flake8: noqa: F403, F405 from .production_settings import * -CACHES['default']['BACKEND'] = "django_prometheus.cache.backends.locmem.LocMemCache" +CACHES["default"]["BACKEND"] = "django_prometheus.cache.backends.locmem.LocMemCache" # Remove the need for GCS as well -DEFAULT_FILE_STORAGE = 'django_s3_storage.storage.S3Storage' +DEFAULT_FILE_STORAGE = "django_s3_storage.storage.S3Storage" diff --git a/contentcuration/contentcuration/migrations/0001_squashed_0094_auto_20180910_2342.py b/contentcuration/contentcuration/migrations/0001_squashed_0094_auto_20180910_2342.py index f09d96d66c..8336457756 100644 --- a/contentcuration/contentcuration/migrations/0001_squashed_0094_auto_20180910_2342.py +++ b/contentcuration/contentcuration/migrations/0001_squashed_0094_auto_20180910_2342.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.9.13 on 2018-10-02 17:57 -from __future__ import unicode_literals - import uuid import django.contrib.postgres.fields.jsonb @@ -20,851 +18,1686 @@ class Migration(migrations.Migration): initial = True dependencies = [ - ('auth', '0007_alter_validators_add_error_messages'), + ("auth", "0007_alter_validators_add_error_messages"), ] operations = [ migrations.CreateModel( - name='User', + name="User", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('password', models.CharField(max_length=128, verbose_name='password')), - ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), - ('email', models.EmailField(max_length=100, unique=True)), - ('first_name', models.CharField(max_length=100)), - ('last_name', models.CharField(max_length=100)), - ('is_admin', models.BooleanField(default=False)), - ('is_active', models.BooleanField(default=False)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("password", models.CharField(max_length=128, verbose_name="password")), + ( + "last_login", + models.DateTimeField( + blank=True, null=True, verbose_name="last login" + ), + ), + ("email", models.EmailField(max_length=100, unique=True)), + ("first_name", models.CharField(max_length=100)), + ("last_name", models.CharField(max_length=100)), + ("is_admin", models.BooleanField(default=False)), + ("is_active", models.BooleanField(default=False)), ], options={ - 'verbose_name': 'User', - 'verbose_name_plural': 'Users', + "verbose_name": "User", + "verbose_name_plural": "Users", }, ), migrations.CreateModel( - name='AssessmentItem', + name="AssessmentItem", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('type', models.CharField(default='multiplechoice', max_length=50)), - ('question', models.TextField(blank=True)), - ('answers', models.TextField(default='[]')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("type", models.CharField(default="multiplechoice", max_length=50)), + ("question", models.TextField(blank=True)), + ("answers", models.TextField(default="[]")), ], ), migrations.CreateModel( - name='Channel', + name="Channel", fields=[ - ('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)), - ('name', models.CharField(max_length=200)), - ('description', models.CharField(blank=True, max_length=400)), - ('version', models.IntegerField(default=0)), - ('thumbnail', models.TextField(blank=True)), - ('deleted', models.BooleanField(default=False)), - ('public', models.BooleanField(default=False)), - ('bookmarked_by', models.ManyToManyField(related_name='bookmarked_channels', to=settings.AUTH_USER_MODEL, verbose_name='bookmarked by')), + ( + "id", + contentcuration.models.UUIDField( + default=uuid.uuid4, + max_length=32, + primary_key=True, + serialize=False, + ), + ), + ("name", models.CharField(max_length=200)), + ("description", models.CharField(blank=True, max_length=400)), + ("version", models.IntegerField(default=0)), + ("thumbnail", models.TextField(blank=True)), + ("deleted", models.BooleanField(default=False)), + ("public", models.BooleanField(default=False)), + ( + "bookmarked_by", + models.ManyToManyField( + related_name="bookmarked_channels", + to=settings.AUTH_USER_MODEL, + verbose_name="bookmarked by", + ), + ), ], options={ - 'verbose_name': 'Channel', - 'verbose_name_plural': 'Channels', + "verbose_name": "Channel", + "verbose_name_plural": "Channels", }, ), migrations.CreateModel( - name='ContentKind', + name="ContentKind", fields=[ - ('kind', models.CharField(choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', - 'Exercise'), ('document', 'Document'), ('image', 'Image')], max_length=200, primary_key=True, serialize=False)), + ( + "kind", + models.CharField( + choices=[ + ("topic", "Topic"), + ("video", "Video"), + ("audio", "Audio"), + ("exercise", "Exercise"), + ("document", "Document"), + ("image", "Image"), + ], + max_length=200, + primary_key=True, + serialize=False, + ), + ), ], ), migrations.CreateModel( - name='ContentNode', + name="ContentNode", fields=[ - ('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)), - ('content_id', contentcuration.models.UUIDField(default=uuid.uuid4, editable=False, max_length=32)), - ('title', models.CharField(max_length=200)), - ('description', models.CharField(blank=True, max_length=400)), - ('sort_order', models.FloatField(default=0, help_text='Ascending, lowest number shown first', max_length=50, verbose_name='sort order')), - ('license_owner', models.CharField(blank=True, help_text='Organization of person who holds the essential rights', max_length=200)), - ('author', models.CharField(blank=True, help_text='Person who created content', max_length=200)), - ('created', models.DateTimeField(auto_now_add=True, verbose_name='created')), - ('modified', models.DateTimeField(auto_now=True, verbose_name='modified')), - ('changed', models.BooleanField(default=True)), - ('lft', models.PositiveIntegerField(db_index=True, editable=False)), - ('rght', models.PositiveIntegerField(db_index=True, editable=False)), - ('tree_id', models.PositiveIntegerField(db_index=True, editable=False)), - ('level', models.PositiveIntegerField(db_index=True, editable=False)), - ('cloned_source', mptt.fields.TreeForeignKey(blank=True, null=True, - on_delete=django.db.models.deletion.SET_NULL, related_name='clones', to='contentcuration.ContentNode')), + ( + "id", + contentcuration.models.UUIDField( + default=uuid.uuid4, + max_length=32, + primary_key=True, + serialize=False, + ), + ), + ( + "content_id", + contentcuration.models.UUIDField( + default=uuid.uuid4, editable=False, max_length=32 + ), + ), + ("title", models.CharField(max_length=200)), + ("description", models.CharField(blank=True, max_length=400)), + ( + "sort_order", + models.FloatField( + default=0, + help_text="Ascending, lowest number shown first", + max_length=50, + verbose_name="sort order", + ), + ), + ( + "license_owner", + models.CharField( + blank=True, + help_text="Organization of person who holds the essential rights", + max_length=200, + ), + ), + ( + "author", + models.CharField( + blank=True, + help_text="Person who created content", + max_length=200, + ), + ), + ( + "created", + models.DateTimeField(auto_now_add=True, verbose_name="created"), + ), + ( + "modified", + models.DateTimeField(auto_now=True, verbose_name="modified"), + ), + ("changed", models.BooleanField(default=True)), + ("lft", models.PositiveIntegerField(db_index=True, editable=False)), + ("rght", models.PositiveIntegerField(db_index=True, editable=False)), + ("tree_id", models.PositiveIntegerField(db_index=True, editable=False)), + ("level", models.PositiveIntegerField(db_index=True, editable=False)), + ( + "cloned_source", + mptt.fields.TreeForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="clones", + to="contentcuration.ContentNode", + ), + ), ], options={ - 'verbose_name': 'Topic', - 'verbose_name_plural': 'Topics', + "verbose_name": "Topic", + "verbose_name_plural": "Topics", }, ), migrations.CreateModel( - name='ContentTag', + name="ContentTag", fields=[ - ('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)), - ('tag_name', models.CharField(max_length=30)), - ('channel', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tags', to='contentcuration.Channel')), + ( + "id", + contentcuration.models.UUIDField( + default=uuid.uuid4, + max_length=32, + primary_key=True, + serialize=False, + ), + ), + ("tag_name", models.CharField(max_length=30)), + ( + "channel", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="tags", + to="contentcuration.Channel", + ), + ), ], ), migrations.CreateModel( - name='Exercise', + name="Exercise", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), ], ), migrations.CreateModel( - name='File', + name="File", fields=[ - ('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)), - ('checksum', models.CharField(blank=True, max_length=400)), - ('file_size', models.IntegerField(blank=True, null=True)), - ('file_on_disk', models.FileField(blank=True, max_length=500, - storage=contentcuration.models.FileOnDiskStorage(), upload_to=contentcuration.models.file_on_disk_name)), - ('original_filename', models.CharField(blank=True, max_length=255)), - ('contentnode', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to='contentcuration.ContentNode')), + ( + "id", + contentcuration.models.UUIDField( + default=uuid.uuid4, + max_length=32, + primary_key=True, + serialize=False, + ), + ), + ("checksum", models.CharField(blank=True, max_length=400)), + ("file_size", models.IntegerField(blank=True, null=True)), + ( + "file_on_disk", + models.FileField( + blank=True, + max_length=500, + storage=contentcuration.models.FileOnDiskStorage(), + upload_to=contentcuration.models.file_on_disk_name, + ), + ), + ("original_filename", models.CharField(blank=True, max_length=255)), + ( + "contentnode", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="files", + to="contentcuration.ContentNode", + ), + ), ], ), migrations.CreateModel( - name='FileFormat', + name="FileFormat", fields=[ - ('extension', models.CharField(choices=[('mp4', 'mp4'), ('vtt', 'vtt'), ('srt', 'srt'), - ('mp3', 'mp3'), ('pdf', 'pdf')], max_length=40, primary_key=True, serialize=False)), - ('mimetype', models.CharField(blank=True, max_length=200)), + ( + "extension", + models.CharField( + choices=[ + ("mp4", "mp4"), + ("vtt", "vtt"), + ("srt", "srt"), + ("mp3", "mp3"), + ("pdf", "pdf"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), + ), + ("mimetype", models.CharField(blank=True, max_length=200)), ], ), migrations.CreateModel( - name='FormatPreset', + name="FormatPreset", fields=[ - ('id', models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('vector_video', 'Vectorized'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ( - 'exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail')], max_length=150, primary_key=True, serialize=False)), - ('readable_name', models.CharField(max_length=400)), - ('multi_language', models.BooleanField(default=False)), - ('supplementary', models.BooleanField(default=False)), - ('order', models.IntegerField(default=0)), - ('allowed_formats', models.ManyToManyField(blank=True, to='contentcuration.FileFormat')), - ('kind', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='format_presets', to='contentcuration.ContentKind')), - ('thumbnail', models.BooleanField(default=False)), - ('display', models.BooleanField(default=True)), - ('subtitle', models.BooleanField(default=False)), + ( + "id", + models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("vector_video", "Vectorized"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), + ), + ("readable_name", models.CharField(max_length=400)), + ("multi_language", models.BooleanField(default=False)), + ("supplementary", models.BooleanField(default=False)), + ("order", models.IntegerField(default=0)), + ( + "allowed_formats", + models.ManyToManyField(blank=True, to="contentcuration.FileFormat"), + ), + ( + "kind", + models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="format_presets", + to="contentcuration.ContentKind", + ), + ), + ("thumbnail", models.BooleanField(default=False)), + ("display", models.BooleanField(default=True)), + ("subtitle", models.BooleanField(default=False)), ], ), migrations.CreateModel( - name='Invitation', + name="Invitation", fields=[ - ('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)), - ('email', models.EmailField(max_length=100, null=True)), - ('first_name', models.CharField(default='Guest', max_length=100)), - ('last_name', models.CharField(blank=True, max_length=100, null=True)), - ('channel', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='pending_editors', to='contentcuration.Channel')), - ('invited', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='sent_to', to=settings.AUTH_USER_MODEL)), - ('sender', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='sent_by', to=settings.AUTH_USER_MODEL)), - ('share_mode', models.CharField(default='edit', max_length=50)), + ( + "id", + contentcuration.models.UUIDField( + default=uuid.uuid4, + max_length=32, + primary_key=True, + serialize=False, + ), + ), + ("email", models.EmailField(max_length=100, null=True)), + ("first_name", models.CharField(default="Guest", max_length=100)), + ("last_name", models.CharField(blank=True, max_length=100, null=True)), + ( + "channel", + models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="pending_editors", + to="contentcuration.Channel", + ), + ), + ( + "invited", + models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="sent_to", + to=settings.AUTH_USER_MODEL, + ), + ), + ( + "sender", + models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="sent_by", + to=settings.AUTH_USER_MODEL, + ), + ), + ("share_mode", models.CharField(default="edit", max_length=50)), ], options={ - 'verbose_name': 'Invitation', - 'verbose_name_plural': 'Invitations', + "verbose_name": "Invitation", + "verbose_name_plural": "Invitations", }, ), migrations.CreateModel( - name='Language', + name="Language", fields=[ - ('id', models.CharField(max_length=14, primary_key=True, serialize=False)), - ('lang_code', models.CharField(db_index=True, max_length=3)), - ('lang_subcode', models.CharField(blank=True, db_index=True, max_length=10, null=True)), - ('readable_name', models.CharField(blank=True, max_length=100)), - ('native_name', models.CharField(blank=True, max_length=100)), - ('lang_direction', models.CharField(choices=[('ltr', 'Left to Right'), ('rtl', 'Right to Left')], default='ltr', max_length=3)), + ( + "id", + models.CharField(max_length=14, primary_key=True, serialize=False), + ), + ("lang_code", models.CharField(db_index=True, max_length=3)), + ( + "lang_subcode", + models.CharField( + blank=True, db_index=True, max_length=10, null=True + ), + ), + ("readable_name", models.CharField(blank=True, max_length=100)), + ("native_name", models.CharField(blank=True, max_length=100)), + ( + "lang_direction", + models.CharField( + choices=[("ltr", "Left to Right"), ("rtl", "Right to Left")], + default="ltr", + max_length=3, + ), + ), ], ), migrations.CreateModel( - name='License', + name="License", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('license_name', models.CharField(max_length=50)), - ('license_url', models.URLField(blank=True)), - ('license_description', models.TextField(blank=True)), - ('exists', models.BooleanField(default=False, help_text='Tells whether or not a content item is licensed to share', verbose_name='license exists')), - ('copyright_holder_required', models.BooleanField(default=True)), - ('is_custom', models.BooleanField(default=False)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("license_name", models.CharField(max_length=50)), + ("license_url", models.URLField(blank=True)), + ("license_description", models.TextField(blank=True)), + ( + "exists", + models.BooleanField( + default=False, + help_text="Tells whether or not a content item is licensed to share", + verbose_name="license exists", + ), + ), + ("copyright_holder_required", models.BooleanField(default=True)), + ("is_custom", models.BooleanField(default=False)), ], ), migrations.CreateModel( - name='PrerequisiteContentRelationship', + name="PrerequisiteContentRelationship", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('prerequisite', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, - related_name='contentcuration_prerequisitecontentrelationship_prerequisite', to='contentcuration.ContentNode')), - ('target_node', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, - related_name='contentcuration_prerequisitecontentrelationship_target_node', to='contentcuration.ContentNode')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "prerequisite", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="contentcuration_prerequisitecontentrelationship_prerequisite", + to="contentcuration.ContentNode", + ), + ), + ( + "target_node", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="contentcuration_prerequisitecontentrelationship_target_node", + to="contentcuration.ContentNode", + ), + ), ], ), migrations.CreateModel( - name='RelatedContentRelationship', + name="RelatedContentRelationship", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('contentnode_1', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, - related_name='contentcuration_relatedcontentrelationship_1', to='contentcuration.ContentNode')), - ('contentnode_2', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, - related_name='contentcuration_relatedcontentrelationship_2', to='contentcuration.ContentNode')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "contentnode_1", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="contentcuration_relatedcontentrelationship_1", + to="contentcuration.ContentNode", + ), + ), + ( + "contentnode_2", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="contentcuration_relatedcontentrelationship_2", + to="contentcuration.ContentNode", + ), + ), ], ), migrations.AddField( - model_name='file', - name='file_format', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to='contentcuration.FileFormat'), - ), - migrations.AddField( - model_name='file', - name='language', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to='contentcuration.Language'), - ), - migrations.AddField( - model_name='file', - name='preset', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='files', to='contentcuration.FormatPreset'), - ), - migrations.AddField( - model_name='contentnode', - name='is_related', - field=models.ManyToManyField(blank=True, related_name='relate_to', - through='contentcuration.RelatedContentRelationship', to='contentcuration.ContentNode'), - ), - migrations.AddField( - model_name='contentnode', - name='kind', - field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contentnodes', to='contentcuration.ContentKind'), - ), - migrations.AddField( - model_name='contentnode', - name='license', - field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='contentcuration.License'), - ), - migrations.AddField( - model_name='contentnode', - name='original_node', - field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, - related_name='duplicates', to='contentcuration.ContentNode'), - ), - migrations.AddField( - model_name='contentnode', - name='parent', - field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='children', to='contentcuration.ContentNode'), - ), - migrations.AddField( - model_name='contentnode', - name='prerequisite', - field=models.ManyToManyField(blank=True, related_name='is_prerequisite_of', - through='contentcuration.PrerequisiteContentRelationship', to='contentcuration.ContentNode'), - ), - migrations.AddField( - model_name='contentnode', - name='tags', - field=models.ManyToManyField(blank=True, related_name='tagged_content', to='contentcuration.ContentTag'), - ), - migrations.AddField( - model_name='channel', - name='clipboard_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='channel_clipboard', to='contentcuration.ContentNode'), - ), - migrations.AddField( - model_name='channel', - name='editors', - field=models.ManyToManyField(blank=True, help_text='Users with edit rights', related_name='editable_channels', - to=settings.AUTH_USER_MODEL, verbose_name='editors'), - ), - migrations.AddField( - model_name='channel', - name='main_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='channel_main', to='contentcuration.ContentNode'), - ), - migrations.AddField( - model_name='channel', - name='trash_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='channel_trash', to='contentcuration.ContentNode'), + model_name="file", + name="file_format", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="files", + to="contentcuration.FileFormat", + ), + ), + migrations.AddField( + model_name="file", + name="language", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="files", + to="contentcuration.Language", + ), + ), + migrations.AddField( + model_name="file", + name="preset", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="files", + to="contentcuration.FormatPreset", + ), + ), + migrations.AddField( + model_name="contentnode", + name="is_related", + field=models.ManyToManyField( + blank=True, + related_name="relate_to", + through="contentcuration.RelatedContentRelationship", + to="contentcuration.ContentNode", + ), + ), + migrations.AddField( + model_name="contentnode", + name="kind", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="contentnodes", + to="contentcuration.ContentKind", + ), + ), + migrations.AddField( + model_name="contentnode", + name="license", + field=models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="contentcuration.License", + ), + ), + migrations.AddField( + model_name="contentnode", + name="original_node", + field=mptt.fields.TreeForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="duplicates", + to="contentcuration.ContentNode", + ), + ), + migrations.AddField( + model_name="contentnode", + name="parent", + field=mptt.fields.TreeForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="children", + to="contentcuration.ContentNode", + ), + ), + migrations.AddField( + model_name="contentnode", + name="prerequisite", + field=models.ManyToManyField( + blank=True, + related_name="is_prerequisite_of", + through="contentcuration.PrerequisiteContentRelationship", + to="contentcuration.ContentNode", + ), + ), + migrations.AddField( + model_name="contentnode", + name="tags", + field=models.ManyToManyField( + blank=True, + related_name="tagged_content", + to="contentcuration.ContentTag", + ), + ), + migrations.AddField( + model_name="channel", + name="clipboard_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_clipboard", + to="contentcuration.ContentNode", + ), + ), + migrations.AddField( + model_name="channel", + name="editors", + field=models.ManyToManyField( + blank=True, + help_text="Users with edit rights", + related_name="editable_channels", + to=settings.AUTH_USER_MODEL, + verbose_name="editors", + ), + ), + migrations.AddField( + model_name="channel", + name="main_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_main", + to="contentcuration.ContentNode", + ), + ), + migrations.AddField( + model_name="channel", + name="trash_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_trash", + to="contentcuration.ContentNode", + ), ), migrations.AlterUniqueTogether( - name='relatedcontentrelationship', - unique_together=set([('contentnode_1', 'contentnode_2')]), + name="relatedcontentrelationship", + unique_together=set([("contentnode_1", "contentnode_2")]), ), migrations.AlterUniqueTogether( - name='prerequisitecontentrelationship', - unique_together=set([('target_node', 'prerequisite')]), + name="prerequisitecontentrelationship", + unique_together=set([("target_node", "prerequisite")]), ), migrations.AlterUniqueTogether( - name='contenttag', - unique_together=set([('tag_name', 'channel')]), + name="contenttag", + unique_together=set([("tag_name", "channel")]), ), migrations.AddField( - model_name='user', - name='clipboard_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='user_clipboard', to='contentcuration.ContentNode'), + model_name="user", + name="clipboard_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="user_clipboard", + to="contentcuration.ContentNode", + ), ), migrations.RenameField( - model_name='contentnode', - old_name='license_owner', - new_name='copyright_holder', + model_name="contentnode", + old_name="license_owner", + new_name="copyright_holder", ), migrations.AlterModelManagers( - name='contentnode', - managers=[ - ], + name="contentnode", + managers=[], ), migrations.AddField( - model_name='contentnode', - name='published', + model_name="contentnode", + name="published", field=models.BooleanField(default=False), ), migrations.AlterField( - model_name='contentnode', - name='sort_order', - field=models.FloatField(default=1, help_text='Ascending, lowest number shown first', max_length=50, verbose_name='sort order'), - ), - migrations.AddField( - model_name='file', - name='source_url', + model_name="contentnode", + name="sort_order", + field=models.FloatField( + default=1, + help_text="Ascending, lowest number shown first", + max_length=50, + verbose_name="sort order", + ), + ), + migrations.AddField( + model_name="file", + name="source_url", field=models.CharField(blank=True, max_length=400, null=True), ), migrations.AddField( - model_name='channel', - name='staging_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='channel_staging', to='contentcuration.ContentNode'), + model_name="channel", + name="staging_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_staging", + to="contentcuration.ContentNode", + ), ), migrations.AlterField( - model_name='contentkind', - name='kind', - field=models.CharField(choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', - 'Exercise'), ('document', 'Document')], max_length=200, primary_key=True, serialize=False), + model_name="contentkind", + name="kind", + field=models.CharField( + choices=[ + ("topic", "Topic"), + ("video", "Video"), + ("audio", "Audio"), + ("exercise", "Exercise"), + ("document", "Document"), + ], + max_length=200, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'mp4'), ('vtt', 'vtt'), ('srt', 'srt'), ('mp3', 'mp3'), ('wav', 'wav'), ('pdf', 'pdf'), - ('jpg', 'jpg'), ('jpeg', 'jpeg'), ('png', 'png'), ('perseus', 'perseus')], max_length=40, primary_key=True, serialize=False), - ), - migrations.AddField( - model_name='contentnode', - name='node_id', - field=contentcuration.models.UUIDField(default=uuid.uuid4, editable=False, max_length=32), - ), - migrations.AddField( - model_name='channel', - name='language', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='channel_language', to='contentcuration.Language'), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "mp4"), + ("vtt", "vtt"), + ("srt", "srt"), + ("mp3", "mp3"), + ("wav", "wav"), + ("pdf", "pdf"), + ("jpg", "jpg"), + ("jpeg", "jpeg"), + ("png", "png"), + ("perseus", "perseus"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), + ), + migrations.AddField( + model_name="contentnode", + name="node_id", + field=contentcuration.models.UUIDField( + default=uuid.uuid4, editable=False, max_length=32 + ), + ), + migrations.AddField( + model_name="channel", + name="language", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_language", + to="contentcuration.Language", + ), ), migrations.AlterField( - model_name='channel', - name='thumbnail', + model_name="channel", + name="thumbnail", field=models.TextField(blank=True, null=True), ), migrations.AddField( - model_name='exercise', - name='contentnode', - field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='exercise', to='contentcuration.ContentNode'), - ), - migrations.AddField( - model_name='exercise', - name='mastery_model', - field=models.CharField(choices=[('do_all', 'Do all'), ('num_correct_in_a_row_2', '2 in a row'), ('num_correct_in_a_row_10', '10 in a row'), ( - 'num_correct_in_a_row_3', '3 in a row'), ('num_correct_in_a_row_5', '5 in a row'), ('skill_check', 'Skill check'), ('m_of_n', 'M out of N')], default='do_all', max_length=200), - ), - migrations.AddField( - model_name='assessmentitem', - name='contentnode', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='assessment_items', to='contentcuration.ContentNode'), - ), - migrations.AddField( - model_name='contentnode', - name='extra_fields', + model_name="exercise", + name="contentnode", + field=models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="exercise", + to="contentcuration.ContentNode", + ), + ), + migrations.AddField( + model_name="exercise", + name="mastery_model", + field=models.CharField( + choices=[ + ("do_all", "Do all"), + ("num_correct_in_a_row_2", "2 in a row"), + ("num_correct_in_a_row_10", "10 in a row"), + ("num_correct_in_a_row_3", "3 in a row"), + ("num_correct_in_a_row_5", "5 in a row"), + ("skill_check", "Skill check"), + ("m_of_n", "M out of N"), + ], + default="do_all", + max_length=200, + ), + ), + migrations.AddField( + model_name="assessmentitem", + name="contentnode", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="assessment_items", + to="contentcuration.ContentNode", + ), + ), + migrations.AddField( + model_name="contentnode", + name="extra_fields", field=models.TextField(blank=True, null=True), ), migrations.AddField( - model_name='assessmentitem', - name='order', + model_name="assessmentitem", + name="order", field=models.IntegerField(default=1), ), migrations.AddField( - model_name='assessmentitem', - name='assessment_id', - field=contentcuration.models.UUIDField(default=uuid.uuid4, editable=False, max_length=32), + model_name="assessmentitem", + name="assessment_id", + field=contentcuration.models.UUIDField( + default=uuid.uuid4, editable=False, max_length=32 + ), ), migrations.AlterField( - model_name='contentnode', - name='license', - field=models.ForeignKey(default=1, null=True, on_delete=django.db.models.deletion.CASCADE, to='contentcuration.License'), - ), - migrations.AddField( - model_name='assessmentitem', - name='raw_data', + model_name="contentnode", + name="license", + field=models.ForeignKey( + default=1, + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="contentcuration.License", + ), + ), + migrations.AddField( + model_name="assessmentitem", + name="raw_data", field=models.TextField(blank=True), ), migrations.AddField( - model_name='assessmentitem', - name='hints', - field=models.TextField(default='[]'), + model_name="assessmentitem", + name="hints", + field=models.TextField(default="[]"), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'mp4'), ('vtt', 'vtt'), ('srt', 'srt'), ('mp3', 'mp3'), ('wav', 'wav'), ('pdf', 'pdf'), ('jpg', 'jpg'), ( - 'jpeg', 'jpeg'), ('png', 'png'), ('json', 'json'), ('svg', 'svg'), ('perseus', 'perseus')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "mp4"), + ("vtt", "vtt"), + ("srt", "srt"), + ("mp3", "mp3"), + ("wav", "wav"), + ("pdf", "pdf"), + ("jpg", "jpg"), + ("jpeg", "jpeg"), + ("png", "png"), + ("json", "json"), + ("svg", "svg"), + ("perseus", "perseus"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), migrations.RemoveField( - model_name='contentnode', - name='author', - ), - migrations.AddField( - model_name='contentnode', - name='author', - field=models.CharField(blank=True, default='', help_text='Who created this content?', max_length=200, null=True), - ), - migrations.AddField( - model_name='file', - name='assessment_item', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='files', to='contentcuration.AssessmentItem'), - ), - migrations.AddField( - model_name='channel', - name='previous_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='channel_previous', to='contentcuration.ContentNode'), + model_name="contentnode", + name="author", + ), + migrations.AddField( + model_name="contentnode", + name="author", + field=models.CharField( + blank=True, + default="", + help_text="Who created this content?", + max_length=200, + null=True, + ), + ), + migrations.AddField( + model_name="file", + name="assessment_item", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="files", + to="contentcuration.AssessmentItem", + ), + ), + migrations.AddField( + model_name="channel", + name="previous_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_previous", + to="contentcuration.ContentNode", + ), ), migrations.AlterField( - model_name='channel', - name='name', + model_name="channel", + name="name", field=models.CharField(blank=True, max_length=200), ), migrations.AlterField( - model_name='contentkind', - name='kind', - field=models.CharField(choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', 'Exercise'), - ('document', 'Document'), ('html5', 'HTML5 App')], max_length=200, primary_key=True, serialize=False), + model_name="contentkind", + name="kind", + field=models.CharField( + choices=[ + ("topic", "Topic"), + ("video", "Video"), + ("audio", "Audio"), + ("exercise", "Exercise"), + ("document", "Document"), + ("html5", "HTML5 App"), + ], + max_length=200, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('wav', 'WAV Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ( - 'jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip')], max_length=40, primary_key=True, serialize=False), - ), - migrations.AddField( - model_name='channel', - name='viewers', - field=models.ManyToManyField(blank=True, help_text='Users with view only rights', related_name='view_only_channels', - to=settings.AUTH_USER_MODEL, verbose_name='viewers'), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("srt", "SRT Subtitle"), + ("mp3", "MP3 Audio"), + ("wav", "WAV Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("zip", "HTML5 Zip"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), + ), + migrations.AddField( + model_name="channel", + name="viewers", + field=models.ManyToManyField( + blank=True, + help_text="Users with view only rights", + related_name="view_only_channels", + to=settings.AUTH_USER_MODEL, + verbose_name="viewers", + ), ), migrations.AlterField( - model_name='channel', - name='name', + model_name="channel", + name="name", field=models.CharField(blank=True, max_length=200), ), migrations.AlterField( - model_name='contentkind', - name='kind', - field=models.CharField(choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', 'Exercise'), - ('document', 'Document'), ('html5', 'HTML5 App')], max_length=200, primary_key=True, serialize=False), + model_name="contentkind", + name="kind", + field=models.CharField( + choices=[ + ("topic", "Topic"), + ("video", "Video"), + ("audio", "Audio"), + ("exercise", "Exercise"), + ("document", "Document"), + ("html5", "HTML5 App"), + ], + max_length=200, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='contentnode', - name='description', + model_name="contentnode", + name="description", field=models.TextField(blank=True), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('wav', 'WAV Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ( - 'jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("srt", "SRT Subtitle"), + ("mp3", "MP3 Audio"), + ("wav", "WAV Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("zip", "HTML5 Zip"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='channel', - name='name', + model_name="channel", + name="name", field=models.CharField(blank=True, max_length=200), ), migrations.AlterField( - model_name='contentkind', - name='kind', - field=models.CharField(choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', 'Exercise'), - ('document', 'Document'), ('html5', 'HTML5 App')], max_length=200, primary_key=True, serialize=False), + model_name="contentkind", + name="kind", + field=models.CharField( + choices=[ + ("topic", "Topic"), + ("video", "Video"), + ("audio", "Audio"), + ("exercise", "Exercise"), + ("document", "Document"), + ("html5", "HTML5 App"), + ], + max_length=200, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('wav', 'WAV Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ( - 'jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip')], max_length=40, primary_key=True, serialize=False), - ), - migrations.AddField( - model_name='contentnode', - name='original_channel_id', - field=contentcuration.models.UUIDField(db_index=True, editable=False, max_length=32, null=True), - ), - migrations.AddField( - model_name='contentnode', - name='source_channel_id', - field=contentcuration.models.UUIDField(editable=False, max_length=32, null=True), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("srt", "SRT Subtitle"), + ("mp3", "MP3 Audio"), + ("wav", "WAV Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("zip", "HTML5 Zip"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), + ), + migrations.AddField( + model_name="contentnode", + name="original_channel_id", + field=contentcuration.models.UUIDField( + db_index=True, editable=False, max_length=32, null=True + ), + ), + migrations.AddField( + model_name="contentnode", + name="source_channel_id", + field=contentcuration.models.UUIDField( + editable=False, max_length=32, null=True + ), ), migrations.AlterField( - model_name='contentnode', - name='copyright_holder', - field=models.CharField(blank=True, default='', help_text='Organization of person who holds the essential rights', max_length=200), + model_name="contentnode", + name="copyright_holder", + field=models.CharField( + blank=True, + default="", + help_text="Organization of person who holds the essential rights", + max_length=200, + ), ), migrations.AddField( - model_name='contentnode', - name='original_source_node_id', - field=contentcuration.models.UUIDField(db_index=True, editable=False, max_length=32, null=True), + model_name="contentnode", + name="original_source_node_id", + field=contentcuration.models.UUIDField( + db_index=True, editable=False, max_length=32, null=True + ), ), migrations.AddField( - model_name='contentnode', - name='source_node_id', - field=contentcuration.models.UUIDField(editable=False, max_length=32, null=True), + model_name="contentnode", + name="source_node_id", + field=contentcuration.models.UUIDField( + editable=False, max_length=32, null=True + ), ), migrations.AddField( - model_name='assessmentitem', - name='source_url', + model_name="assessmentitem", + name="source_url", field=models.CharField(blank=True, max_length=400, null=True), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ( - 'png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip')], max_length=40, primary_key=True, serialize=False), - ), - migrations.AddField( - model_name='channel', - name='ricecooker_version', + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("srt", "SRT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("zip", "HTML5 Zip"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), + ), + migrations.AddField( + model_name="channel", + name="ricecooker_version", field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AddField( - model_name='channel', - name='source_domain', + model_name="channel", + name="source_domain", field=models.CharField(blank=True, max_length=300, null=True), ), migrations.AddField( - model_name='channel', - name='source_id', + model_name="channel", + name="source_id", field=models.CharField(blank=True, max_length=200, null=True), ), migrations.AddField( - model_name='contentnode', - name='source_domain', + model_name="contentnode", + name="source_domain", field=models.CharField(blank=True, max_length=300, null=True), ), migrations.AddField( - model_name='contentnode', - name='source_id', + model_name="contentnode", + name="source_id", field=models.CharField(blank=True, max_length=200, null=True), ), migrations.AddField( - model_name='assessmentitem', - name='randomize', + model_name="assessmentitem", + name="randomize", field=models.BooleanField(default=False), ), migrations.AlterField( - model_name='channel', - name='deleted', + model_name="channel", + name="deleted", field=models.BooleanField(db_index=True, default=False), ), migrations.AlterField( - model_name='channel', - name='public', + model_name="channel", + name="public", field=models.BooleanField(db_index=True, default=False), ), migrations.AlterField( - model_name='contentnode', - name='changed', + model_name="contentnode", + name="changed", field=models.BooleanField(db_index=True, default=True), ), migrations.AlterField( - model_name='file', - name='checksum', + model_name="file", + name="checksum", field=models.CharField(blank=True, db_index=True, max_length=400), ), migrations.AddField( - model_name='assessmentitem', - name='deleted', + model_name="assessmentitem", + name="deleted", field=models.BooleanField(default=False), ), migrations.CreateModel( - name='ChannelResourceSize', + name="ChannelResourceSize", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('tree_id', models.IntegerField()), - ('resource_size', models.IntegerField()), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("tree_id", models.IntegerField()), + ("resource_size", models.IntegerField()), ], options={ - 'db_table': 'contentcuration_channel_resource_sizes', - 'managed': False, + "db_table": "contentcuration_channel_resource_sizes", + "managed": False, }, ), migrations.CreateModel( - name='ChannelResourceSize', + name="ChannelResourceSize", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('tree_id', models.IntegerField()), - ('resource_size', models.IntegerField()), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("tree_id", models.IntegerField()), + ("resource_size", models.IntegerField()), ], options={ - 'db_table': 'contentcuration_channel_resource_sizes', - 'managed': False, + "db_table": "contentcuration_channel_resource_sizes", + "managed": False, }, ), migrations.AddField( - model_name='user', - name='preferences', - field=models.TextField(default='{"auto_derive_exercise_thumbnail": true, "auto_derive_video_thumbnail": true, "m_value": 5, "language": null, "license": null, "author": null, "aggregator": null, "auto_randomize_questions": true, "auto_derive_document_thumbnail": true, "copyright_holder": null, "auto_derive_html5_thumbnail": true, "provider": null, "auto_derive_audio_thumbnail": true, "license_description": null, "n_value": 5, "mastery_model": "num_correct_in_a_row_5"}'), + model_name="user", + name="preferences", + field=models.TextField( + default='{"auto_derive_exercise_thumbnail": true, "auto_derive_video_thumbnail": true, "m_value": 5, "language": null, "license": null, "author": null, "aggregator": null, "auto_randomize_questions": true, "auto_derive_document_thumbnail": true, "copyright_holder": null, "auto_derive_html5_thumbnail": true, "provider": null, "auto_derive_audio_thumbnail": true, "license_description": null, "n_value": 5, "mastery_model": "num_correct_in_a_row_5"}' + ), ), migrations.AddField( - model_name='contentnode', - name='license_description', + model_name="contentnode", + name="license_description", field=models.CharField(blank=True, max_length=400, null=True), ), migrations.AlterField( - model_name='contentnode', - name='copyright_holder', - field=models.CharField(blank=True, default='', help_text='Organization of person who holds the essential rights', max_length=200, null=True), - ), - migrations.AddField( - model_name='user', - name='date_joined', - field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined'), - ), - migrations.AddField( - model_name='user', - name='groups', - field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', - related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'), - ), - migrations.AddField( - model_name='user', - name='is_staff', - field=models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status'), - ), - migrations.AddField( - model_name='user', - name='is_superuser', + model_name="contentnode", + name="copyright_holder", + field=models.CharField( + blank=True, + default="", + help_text="Organization of person who holds the essential rights", + max_length=200, + null=True, + ), + ), + migrations.AddField( + model_name="user", + name="date_joined", + field=models.DateTimeField( + default=django.utils.timezone.now, verbose_name="date joined" + ), + ), + migrations.AddField( + model_name="user", + name="groups", + field=models.ManyToManyField( + blank=True, + help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.", + related_name="user_set", + related_query_name="user", + to="auth.Group", + verbose_name="groups", + ), + ), + migrations.AddField( + model_name="user", + name="is_staff", field=models.BooleanField( - default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status'), + default=False, + help_text="Designates whether the user can log into this admin site.", + verbose_name="staff status", + ), ), migrations.AddField( - model_name='user', - name='user_permissions', - field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', - related_query_name='user', to='auth.Permission', verbose_name='user permissions'), + model_name="user", + name="is_superuser", + field=models.BooleanField( + default=False, + help_text="Designates that this user has all permissions without explicitly assigning them.", + verbose_name="superuser status", + ), + ), + migrations.AddField( + model_name="user", + name="user_permissions", + field=models.ManyToManyField( + blank=True, + help_text="Specific permissions for this user.", + related_name="user_set", + related_query_name="user", + to="auth.Permission", + verbose_name="user permissions", + ), ), migrations.AlterField( - model_name='user', - name='is_active', - field=models.BooleanField(default=False, help_text='Designates whether this user should be treated as active.', verbose_name='active'), + model_name="user", + name="is_active", + field=models.BooleanField( + default=False, + help_text="Designates whether this user should be treated as active.", + verbose_name="active", + ), ), migrations.AddField( - model_name='channel', - name='chef_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='channel_chef', to='contentcuration.ContentNode'), + model_name="channel", + name="chef_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_chef", + to="contentcuration.ContentNode", + ), ), migrations.AddField( - model_name='channel', - name='preferences', - field=models.TextField(default='{"auto_derive_exercise_thumbnail": true, "auto_derive_video_thumbnail": true, "m_value": 5, "language": null, "license": null, "author": null, "aggregator": null, "auto_randomize_questions": true, "auto_derive_document_thumbnail": true, "copyright_holder": null, "auto_derive_html5_thumbnail": true, "provider": null, "auto_derive_audio_thumbnail": true, "license_description": null, "n_value": 5, "mastery_model": "num_correct_in_a_row_5"}'), + model_name="channel", + name="preferences", + field=models.TextField( + default='{"auto_derive_exercise_thumbnail": true, "auto_derive_video_thumbnail": true, "m_value": 5, "language": null, "license": null, "author": null, "aggregator": null, "auto_randomize_questions": true, "auto_derive_document_thumbnail": true, "copyright_holder": null, "auto_derive_html5_thumbnail": true, "provider": null, "auto_derive_audio_thumbnail": true, "license_description": null, "n_value": 5, "mastery_model": "num_correct_in_a_row_5"}' + ), ), migrations.AddField( - model_name='channel', - name='thumbnail_encoding', + model_name="channel", + name="thumbnail_encoding", field=models.TextField(blank=True, null=True), ), migrations.AddField( - model_name='contentnode', - name='thumbnail_encoding', + model_name="contentnode", + name="thumbnail_encoding", field=models.TextField(blank=True, null=True), ), migrations.AlterField( - model_name='contentnode', - name='content_id', - field=contentcuration.models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, max_length=32), - ), - migrations.AddField( - model_name='contentnode', - name='language', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='content_language', to='contentcuration.Language'), + model_name="contentnode", + name="content_id", + field=contentcuration.models.UUIDField( + db_index=True, default=uuid.uuid4, editable=False, max_length=32 + ), + ), + migrations.AddField( + model_name="contentnode", + name="language", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="content_language", + to="contentcuration.Language", + ), ), migrations.CreateModel( - name='SecretToken', + name="SecretToken", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('token', models.CharField(max_length=100, unique=True)), - ('is_primary', models.BooleanField(default=False)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("token", models.CharField(max_length=100, unique=True)), + ("is_primary", models.BooleanField(default=False)), ], ), migrations.AddField( - model_name='channel', - name='secret_tokens', - field=models.ManyToManyField(blank=True, related_name='channels', to='contentcuration.SecretToken', verbose_name='secret tokens'), + model_name="channel", + name="secret_tokens", + field=models.ManyToManyField( + blank=True, + related_name="channels", + to="contentcuration.SecretToken", + verbose_name="secret tokens", + ), ), migrations.AddField( - model_name='channel', - name='priority', - field=models.IntegerField(default=0, help_text='Order to display public channels'), + model_name="channel", + name="priority", + field=models.IntegerField( + default=0, help_text="Order to display public channels" + ), ), migrations.CreateModel( - name='StagedFile', + name="StagedFile", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('checksum', models.CharField(blank=True, db_index=True, max_length=400)), - ('file_size', models.IntegerField(blank=True, null=True)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "checksum", + models.CharField(blank=True, db_index=True, max_length=400), + ), + ("file_size", models.IntegerField(blank=True, null=True)), ], ), migrations.AddField( - model_name='file', - name='uploaded_by', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to=settings.AUTH_USER_MODEL), + model_name="file", + name="uploaded_by", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="files", + to=settings.AUTH_USER_MODEL, + ), ), migrations.AddField( - model_name='user', - name='disk_space', - field=models.FloatField(default=524288000, help_text='How many bytes a user can upload'), + model_name="user", + name="disk_space", + field=models.FloatField( + default=524288000, help_text="How many bytes a user can upload" + ), ), migrations.AddField( - model_name='stagedfile', - name='uploaded_by', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='staged_files', to=settings.AUTH_USER_MODEL), + model_name="stagedfile", + name="uploaded_by", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="staged_files", + to=settings.AUTH_USER_MODEL, + ), ), migrations.AddField( - model_name='contentnode', - name='freeze_authoring_data', + model_name="contentnode", + name="freeze_authoring_data", field=models.BooleanField(default=False), ), migrations.AlterField( - model_name='contentnode', - name='license', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='contentcuration.License'), - ), - migrations.AddField( - model_name='channel', - name='icon_encoding', + model_name="contentnode", + name="license", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="contentcuration.License", + ), + ), + migrations.AddField( + model_name="channel", + name="icon_encoding", field=models.TextField(blank=True, null=True), ), migrations.AddField( - model_name='channel', - name='last_published', + model_name="channel", + name="last_published", field=models.DateTimeField(blank=True, null=True), ), migrations.AddField( - model_name='channel', - name='included_languages', - field=models.ManyToManyField(blank=True, related_name='channels', to='contentcuration.Language', verbose_name='languages'), + model_name="channel", + name="included_languages", + field=models.ManyToManyField( + blank=True, + related_name="channels", + to="contentcuration.Language", + verbose_name="languages", + ), ), migrations.AddField( - model_name='channel', - name='published_kind_count', + model_name="channel", + name="published_kind_count", field=models.TextField(blank=True, null=True), ), migrations.AddField( - model_name='channel', - name='published_size', + model_name="channel", + name="published_size", field=models.FloatField(default=0), ), migrations.AddField( - model_name='channel', - name='total_resource_count', + model_name="channel", + name="total_resource_count", field=models.IntegerField(default=0), ), migrations.AddField( - model_name='contentnode', - name='publishing', + model_name="contentnode", + name="publishing", field=models.BooleanField(default=False), ), migrations.AddField( - model_name='user', - name='information', + model_name="user", + name="information", field=django.contrib.postgres.fields.jsonb.JSONField(null=True), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ( - 'png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip'), ('epub', 'ePub Document')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("srt", "SRT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("zip", "HTML5 Zip"), + ("epub", "ePub Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ( - 'png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip'), ('epub', 'ePub Document')], max_length=40, primary_key=True, serialize=False), - ), - migrations.AddField( - model_name='channel', - name='content_defaults', + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("srt", "SRT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("zip", "HTML5 Zip"), + ("epub", "ePub Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), + ), + migrations.AddField( + model_name="channel", + name="content_defaults", field=django.contrib.postgres.fields.jsonb.JSONField(default=dict), ), migrations.AlterIndexTogether( - name='channel', - index_together=set([('deleted', 'public')]), + name="channel", + index_together=set([("deleted", "public")]), ), migrations.AddField( - model_name='contentnode', - name='role_visibility', - field=models.CharField(choices=[('coach', 'Coach'), ('learner', 'Learner')], default='learner', max_length=50), + model_name="contentnode", + name="role_visibility", + field=models.CharField( + choices=[("coach", "Coach"), ("learner", "Learner")], + default="learner", + max_length=50, + ), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ( - 'png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip'), ('epub', 'ePub Document')], max_length=40, primary_key=True, serialize=False), - ), - migrations.AddField( - model_name='user', - name='content_defaults', + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("srt", "SRT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("zip", "HTML5 Zip"), + ("epub", "ePub Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), + ), + migrations.AddField( + model_name="user", + name="content_defaults", field=django.contrib.postgres.fields.jsonb.JSONField(default=dict), ), migrations.AddField( - model_name='user', - name='policies', - field=django.contrib.postgres.fields.jsonb.JSONField(default=dict, null=True), + model_name="user", + name="policies", + field=django.contrib.postgres.fields.jsonb.JSONField( + default=dict, null=True + ), ), migrations.AddField( - model_name='contentnode', - name='aggregator', - field=models.CharField(blank=True, default='', help_text='Who gathered this content together?', max_length=200, null=True), + model_name="contentnode", + name="aggregator", + field=models.CharField( + blank=True, + default="", + help_text="Who gathered this content together?", + max_length=200, + null=True, + ), ), migrations.AddField( - model_name='contentnode', - name='provider', - field=models.CharField(blank=True, default='', help_text='Who distributed this content?', max_length=200, null=True), + model_name="contentnode", + name="provider", + field=models.CharField( + blank=True, + default="", + help_text="Who distributed this content?", + max_length=200, + null=True, + ), ), migrations.AlterField( - model_name='contentnode', - name='changed', + model_name="contentnode", + name="changed", field=models.BooleanField(default=True), ), migrations.AlterField( - model_name='file', - name='file_on_disk', - field=models.FileField(blank=True, max_length=500, upload_to=contentcuration.models.object_storage_name), + model_name="file", + name="file_on_disk", + field=models.FileField( + blank=True, + max_length=500, + upload_to=contentcuration.models.object_storage_name, + ), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ( - 'json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('epub', 'ePub Document')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("epub", "ePub Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), migrations.CreateModel( - name='ChannelSet', + name="ChannelSet", fields=[ - ('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)), - ('name', models.CharField(blank=True, max_length=200)), - ('description', models.CharField(blank=True, max_length=400)), - ('public', models.BooleanField(db_index=True, default=False)), - ('editors', models.ManyToManyField(blank=True, help_text='Users with edit rights', related_name='channel_sets', to=settings.AUTH_USER_MODEL, verbose_name='editors')), - ('secret_token', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channel_sets', to='contentcuration.SecretToken')), + ( + "id", + contentcuration.models.UUIDField( + default=uuid.uuid4, + max_length=32, + primary_key=True, + serialize=False, + ), + ), + ("name", models.CharField(blank=True, max_length=200)), + ("description", models.CharField(blank=True, max_length=400)), + ("public", models.BooleanField(db_index=True, default=False)), + ( + "editors", + models.ManyToManyField( + blank=True, + help_text="Users with edit rights", + related_name="channel_sets", + to=settings.AUTH_USER_MODEL, + verbose_name="editors", + ), + ), + ( + "secret_token", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="channel_sets", + to="contentcuration.SecretToken", + ), + ), ], ), ] diff --git a/contentcuration/contentcuration/migrations/0002_auto_20181220_1734.py b/contentcuration/contentcuration/migrations/0002_auto_20181220_1734.py index 919b892d9f..f01730b555 100644 --- a/contentcuration/contentcuration/migrations/0002_auto_20181220_1734.py +++ b/contentcuration/contentcuration/migrations/0002_auto_20181220_1734.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2018-12-20 17:34 -from __future__ import unicode_literals - import django.contrib.postgres.fields.jsonb from django.db import migrations @@ -9,13 +7,13 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0001_squashed_0094_auto_20180910_2342'), + ("contentcuration", "0001_squashed_0094_auto_20180910_2342"), ] operations = [ migrations.AddField( - model_name='channel', - name='thumbnail_encoding_json', + model_name="channel", + name="thumbnail_encoding_json", field=django.contrib.postgres.fields.jsonb.JSONField(default=dict), ), ] diff --git a/contentcuration/contentcuration/migrations/0003_copy_data.py b/contentcuration/contentcuration/migrations/0003_copy_data.py index 402b993dd8..e9f073b424 100644 --- a/contentcuration/contentcuration/migrations/0003_copy_data.py +++ b/contentcuration/contentcuration/migrations/0003_copy_data.py @@ -1,31 +1,37 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2018-12-20 17:34 -from __future__ import unicode_literals - -from django.db import migrations - import ast import json +from django.db import migrations + def forwards(apps, schema_editor): - Channel = apps.get_model('contentcuration', 'channel') + Channel = apps.get_model("contentcuration", "channel") for channel in Channel.objects.all(): - channel.thumbnail_encoding_json = ast.literal_eval(channel.thumbnail_encoding) if channel.thumbnail_encoding else {} + channel.thumbnail_encoding_json = ( + ast.literal_eval(channel.thumbnail_encoding) + if channel.thumbnail_encoding + else {} + ) channel.save() def backwards(apps, schema_editor): - Channel = apps.get_model('contentcuration', 'channel') + Channel = apps.get_model("contentcuration", "channel") for channel in Channel.objects.all(): - channel.thumbnail_encoding = json.dumps(channel.thumbnail_encoding_json) if channel.thumbnail_encoding_json else None + channel.thumbnail_encoding = ( + json.dumps(channel.thumbnail_encoding_json) + if channel.thumbnail_encoding_json + else None + ) channel.save() class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0002_auto_20181220_1734'), + ("contentcuration", "0002_auto_20181220_1734"), ] operations = [ diff --git a/contentcuration/contentcuration/migrations/0004_remove_rename_json_field.py b/contentcuration/contentcuration/migrations/0004_remove_rename_json_field.py index 1504437609..d840b2196d 100644 --- a/contentcuration/contentcuration/migrations/0004_remove_rename_json_field.py +++ b/contentcuration/contentcuration/migrations/0004_remove_rename_json_field.py @@ -1,21 +1,19 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2018-12-20 17:34 -from __future__ import unicode_literals - from django.db import migrations class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0003_copy_data'), + ("contentcuration", "0003_copy_data"), ] operations = [ - migrations.RemoveField(model_name='channel', name='thumbnail_encoding'), + migrations.RemoveField(model_name="channel", name="thumbnail_encoding"), migrations.RenameField( - model_name='channel', - old_name='thumbnail_encoding_json', - new_name='thumbnail_encoding', + model_name="channel", + old_name="thumbnail_encoding_json", + new_name="thumbnail_encoding", ), ] diff --git a/contentcuration/contentcuration/migrations/0097_task.py b/contentcuration/contentcuration/migrations/0097_task.py index 5e411b2076..3118d19c25 100644 --- a/contentcuration/contentcuration/migrations/0097_task.py +++ b/contentcuration/contentcuration/migrations/0097_task.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2019-02-28 20:19 -from __future__ import unicode_literals - import uuid import django.contrib.postgres.fields.jsonb @@ -16,26 +14,71 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0004_remove_rename_json_field'), + ("contentcuration", "0004_remove_rename_json_field"), ] operations = [ migrations.CreateModel( - name='Task', + name="Task", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('task_type', models.CharField(max_length=50)), - ('created', models.DateTimeField(default=django.utils.timezone.now)), - ('status', models.CharField(max_length=10)), - ('is_progress_tracking', models.BooleanField(default=False)), - ('metadata', django.contrib.postgres.fields.jsonb.JSONField()), - ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task', to=settings.AUTH_USER_MODEL)), - ('task_id', contentcuration.models.UUIDField(db_index=True, default=uuid.uuid4, max_length=32)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("task_type", models.CharField(max_length=50)), + ("created", models.DateTimeField(default=django.utils.timezone.now)), + ("status", models.CharField(max_length=10)), + ("is_progress_tracking", models.BooleanField(default=False)), + ("metadata", django.contrib.postgres.fields.jsonb.JSONField()), + ( + "user", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="task", + to=settings.AUTH_USER_MODEL, + ), + ), + ( + "task_id", + contentcuration.models.UUIDField( + db_index=True, default=uuid.uuid4, max_length=32 + ), + ), ], ), migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0098_auto_20190424_1709.py b/contentcuration/contentcuration/migrations/0098_auto_20190424_1709.py index 8ad3980ffc..0be055421e 100644 --- a/contentcuration/contentcuration/migrations/0098_auto_20190424_1709.py +++ b/contentcuration/contentcuration/migrations/0098_auto_20190424_1709.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-04-24 17:09 -from __future__ import unicode_literals - import django.contrib.postgres.fields.jsonb import django.db.models.deletion from django.db import migrations @@ -11,32 +9,99 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0097_task'), + ("contentcuration", "0097_task"), ] operations = [ migrations.CreateModel( - name='SlideshowSlide', + name="SlideshowSlide", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('sort_order', models.FloatField(default=1.0)), - ('metadata', django.contrib.postgres.fields.jsonb.JSONField(default={})), - ('contentnode', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='slideshow_slides', to='contentcuration.ContentNode')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("sort_order", models.FloatField(default=1.0)), + ( + "metadata", + django.contrib.postgres.fields.jsonb.JSONField(default={}), + ), + ( + "contentnode", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="slideshow_slides", + to="contentcuration.ContentNode", + ), + ), ], ), migrations.AlterField( - model_name='contentkind', - name='kind', - field=models.CharField(choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', 'Exercise'), ('document', 'Document'), ('html5', 'HTML5 App'), ('slideshow', 'Slideshow')], max_length=200, primary_key=True, serialize=False), + model_name="contentkind", + name="kind", + field=models.CharField( + choices=[ + ("topic", "Topic"), + ("video", "Video"), + ("audio", "Audio"), + ("exercise", "Exercise"), + ("document", "Document"), + ("html5", "HTML5 App"), + ("slideshow", "Slideshow"), + ], + max_length=200, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), migrations.AddField( - model_name='file', - name='slideshow_slide', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to='contentcuration.SlideshowSlide'), + model_name="file", + name="slideshow_slide", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="files", + to="contentcuration.SlideshowSlide", + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0099_auto_20190715_2201.py b/contentcuration/contentcuration/migrations/0099_auto_20190715_2201.py index 95a18ed834..9e66f0c52c 100644 --- a/contentcuration/contentcuration/migrations/0099_auto_20190715_2201.py +++ b/contentcuration/contentcuration/migrations/0099_auto_20190715_2201.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-07-15 22:01 -from __future__ import unicode_literals - from django.db import migrations from django.db import models @@ -9,13 +7,13 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0098_auto_20190424_1709'), + ("contentcuration", "0098_auto_20190424_1709"), ] operations = [ migrations.AlterField( - model_name='contenttag', - name='tag_name', + model_name="contenttag", + name="tag_name", field=models.CharField(max_length=50), ), ] diff --git a/contentcuration/contentcuration/migrations/0100_calculate_included_languages.py b/contentcuration/contentcuration/migrations/0100_calculate_included_languages.py index 62d7c6095b..426c4ec4ca 100644 --- a/contentcuration/contentcuration/migrations/0100_calculate_included_languages.py +++ b/contentcuration/contentcuration/migrations/0100_calculate_included_languages.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-06-12 00:00 -from __future__ import unicode_literals - from datetime import datetime from django.db import migrations @@ -13,7 +11,9 @@ def calculate_included_languages(apps, schema_editor): Channel = apps.get_model("contentcuration", "Channel") ContentNode = apps.get_model("contentcuration", "ContentNode") - for channel in Channel.objects.filter(main_tree__isnull=False, last_published__lt=included_languages_deploy_date): + for channel in Channel.objects.filter( + main_tree__isnull=False, last_published__lt=included_languages_deploy_date + ): content_nodes = ContentNode.objects.filter( tree_id=channel.main_tree.tree_id, published=True, @@ -37,7 +37,7 @@ def calculate_included_languages(apps, schema_editor): class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0099_auto_20190715_2201'), + ("contentcuration", "0099_auto_20190715_2201"), ] operations = [ diff --git a/contentcuration/contentcuration/migrations/0101_extra_fields_json_field.py b/contentcuration/contentcuration/migrations/0101_extra_fields_json_field.py index 23ba8a20a2..9791ecc788 100644 --- a/contentcuration/contentcuration/migrations/0101_extra_fields_json_field.py +++ b/contentcuration/contentcuration/migrations/0101_extra_fields_json_field.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Semi-automatically generated by Micah 1.11.20 on 2019-04-24 23:05 -from __future__ import unicode_literals - import django.contrib.postgres.fields.jsonb from django.db import connection from django.db import migrations @@ -9,22 +7,23 @@ from contentcuration.models import ContentNode + class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0100_calculate_included_languages'), + ("contentcuration", "0100_calculate_included_languages"), ] operations = [ - migrations.RunSQL( # converts the extra_fields column from text to jsonb - "ALTER TABLE %s ALTER COLUMN extra_fields TYPE jsonb USING extra_fields::json;" % ContentNode._meta.db_table, + "ALTER TABLE %s ALTER COLUMN extra_fields TYPE jsonb USING extra_fields::json;" + % ContentNode._meta.db_table, # keeps the Django model in sync with the database state_operations=[ migrations.AlterField( - 'contentnode', - 'extra_fields', + "contentnode", + "extra_fields", django.contrib.postgres.fields.jsonb.JSONField(), ), ], @@ -34,12 +33,13 @@ class Migration(migrations.Migration): # as otherwise pre-conversion migration tests can fail if we allow null. reverse_sql="""ALTER TABLE %s ALTER COLUMN extra_fields TYPE text USING extra_fields #>> '{}'; ALTER TABLE %s ALTER COLUMN extra_fields DROP NOT NULL; - """ % (ContentNode._meta.db_table, ContentNode._meta.db_table), + """ + % (ContentNode._meta.db_table, ContentNode._meta.db_table), ), - # This is to update `ContentNode` entries with `extra_fields=="null"` to actual NULL values migrations.RunSQL( - "UPDATE %s SET extra_fields=NULL WHERE extra_fields = 'null'" % ContentNode._meta.db_table, - migrations.RunSQL.noop # don't bother to reverse this - ) + "UPDATE %s SET extra_fields=NULL WHERE extra_fields = 'null'" + % ContentNode._meta.db_table, + migrations.RunSQL.noop, # don't bother to reverse this + ), ] diff --git a/contentcuration/contentcuration/migrations/0102_auto_20190904_1627.py b/contentcuration/contentcuration/migrations/0102_auto_20190904_1627.py index e22681cc85..856f183a63 100644 --- a/contentcuration/contentcuration/migrations/0102_auto_20190904_1627.py +++ b/contentcuration/contentcuration/migrations/0102_auto_20190904_1627.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-09-04 16:27 -from __future__ import unicode_literals - import django.contrib.postgres.fields.jsonb from django.db import migrations from django.db import models @@ -10,13 +8,13 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0101_extra_fields_json_field'), + ("contentcuration", "0101_extra_fields_json_field"), ] operations = [ migrations.AlterField( - model_name='contentnode', - name='extra_fields', + model_name="contentnode", + name="extra_fields", field=django.contrib.postgres.fields.jsonb.JSONField(default=dict), ), ] diff --git a/contentcuration/contentcuration/migrations/0103_auto_20190905_0408.py b/contentcuration/contentcuration/migrations/0103_auto_20190905_0408.py index 3adf2ba00b..e60e71999a 100644 --- a/contentcuration/contentcuration/migrations/0103_auto_20190905_0408.py +++ b/contentcuration/contentcuration/migrations/0103_auto_20190905_0408.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-09-05 04:08 -from __future__ import unicode_literals - from django.db import migrations from django.db import models @@ -9,18 +7,69 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0102_auto_20190904_1627'), + ("contentcuration", "0102_auto_20190904_1627"), ] operations = [ migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('epub', 'ePub Document')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("epub", "ePub Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0104_auto_20191028_2325.py b/contentcuration/contentcuration/migrations/0104_auto_20191028_2325.py index bb7a1bf983..8ec05b3733 100644 --- a/contentcuration/contentcuration/migrations/0104_auto_20191028_2325.py +++ b/contentcuration/contentcuration/migrations/0104_auto_20191028_2325.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-10-28 23:25 -from __future__ import unicode_literals - from django.db import migrations from django.db import models @@ -9,13 +7,42 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0103_auto_20190905_0408'), + ("contentcuration", "0103_auto_20190905_0408"), ] operations = [ migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0105_channel_published_data.py b/contentcuration/contentcuration/migrations/0105_channel_published_data.py index 8ac4104871..d7dd241958 100644 --- a/contentcuration/contentcuration/migrations/0105_channel_published_data.py +++ b/contentcuration/contentcuration/migrations/0105_channel_published_data.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-11-13 02:17 -from __future__ import unicode_literals - import django.contrib.postgres.fields.jsonb from django.db import migrations @@ -9,13 +7,13 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0104_auto_20191028_2325'), + ("contentcuration", "0104_auto_20191028_2325"), ] operations = [ migrations.AddField( - model_name='channel', - name='published_data', + model_name="channel", + name="published_data", field=django.contrib.postgres.fields.jsonb.JSONField(null=True), ), ] diff --git a/contentcuration/contentcuration/migrations/0106_auto_20191113_0217.py b/contentcuration/contentcuration/migrations/0106_auto_20191113_0217.py index 00358a6dde..c964cbc6c9 100644 --- a/contentcuration/contentcuration/migrations/0106_auto_20191113_0217.py +++ b/contentcuration/contentcuration/migrations/0106_auto_20191113_0217.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-11-13 02:17 -from __future__ import unicode_literals - import django.contrib.postgres.fields.jsonb from django.db import migrations @@ -9,13 +7,13 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0105_channel_published_data'), + ("contentcuration", "0105_channel_published_data"), ] operations = [ migrations.AlterField( - model_name='channel', - name='published_data', + model_name="channel", + name="published_data", field=django.contrib.postgres.fields.jsonb.JSONField(default=dict), ), ] diff --git a/contentcuration/contentcuration/migrations/0107_auto_20191115_2344.py b/contentcuration/contentcuration/migrations/0107_auto_20191115_2344.py index cbcfa6ec92..ea2d48fc2e 100644 --- a/contentcuration/contentcuration/migrations/0107_auto_20191115_2344.py +++ b/contentcuration/contentcuration/migrations/0107_auto_20191115_2344.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-11-15 23:44 -from __future__ import unicode_literals - from django.db import migrations from django.db import models @@ -9,24 +7,80 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0106_auto_20191113_0217'), + ("contentcuration", "0106_auto_20191113_0217"), ] operations = [ migrations.CreateModel( - name='MPTTTreeIDManager', + name="MPTTTreeIDManager", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), ], ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('epub', 'ePub Document')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("epub", "ePub Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0108_mptt_tree_id_migration.py b/contentcuration/contentcuration/migrations/0108_mptt_tree_id_migration.py index 05bdd0024c..2eb02e8ad4 100644 --- a/contentcuration/contentcuration/migrations/0108_mptt_tree_id_migration.py +++ b/contentcuration/contentcuration/migrations/0108_mptt_tree_id_migration.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals - from django.db import migrations from django.db import models @@ -12,14 +10,19 @@ def delete_tree_id_records(apps, schema_editor): Note that this technically does not reverse the migration, as IDs are not re-used after deletion, but just returns the table to an empty state undoing the record creation. """ - MPTTTreeIDManager = apps.get_model('contentcuration', 'MPTTTreeIDManager') + MPTTTreeIDManager = apps.get_model("contentcuration", "MPTTTreeIDManager") MPTTTreeIDManager.objects.all().delete() def update_tree_id_integer(apps, schema_editor): - MPTTTreeIDManager = apps.get_model('contentcuration', 'MPTTTreeIDManager') + MPTTTreeIDManager = apps.get_model("contentcuration", "MPTTTreeIDManager") # In tests, we won't have any existing MPTT trees, so this will return None. - max_id = ContentNode.objects.filter(parent=None).aggregate(max_id=models.Max('tree_id'))['max_id'] or 0 + max_id = ( + ContentNode.objects.filter(parent=None).aggregate(max_id=models.Max("tree_id"))[ + "max_id" + ] + or 0 + ) objects = [] for i in range(max_id): objects.append(MPTTTreeIDManager()) @@ -31,9 +34,11 @@ def update_tree_id_integer(apps, schema_editor): class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0107_auto_20191115_2344'), + ("contentcuration", "0107_auto_20191115_2344"), ] operations = [ - migrations.RunPython(update_tree_id_integer, reverse_code=delete_tree_id_records), + migrations.RunPython( + update_tree_id_integer, reverse_code=delete_tree_id_records + ), ] diff --git a/contentcuration/contentcuration/migrations/0109_auto_20191202_1759.py b/contentcuration/contentcuration/migrations/0109_auto_20191202_1759.py index c925e8a638..6d0941d063 100644 --- a/contentcuration/contentcuration/migrations/0109_auto_20191202_1759.py +++ b/contentcuration/contentcuration/migrations/0109_auto_20191202_1759.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-12-02 17:59 -from __future__ import unicode_literals - import django.contrib.postgres.fields.jsonb from django.db import migrations @@ -9,13 +7,15 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0108_mptt_tree_id_migration'), + ("contentcuration", "0108_mptt_tree_id_migration"), ] operations = [ migrations.AlterField( - model_name='contentnode', - name='extra_fields', - field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict, null=True), + model_name="contentnode", + name="extra_fields", + field=django.contrib.postgres.fields.jsonb.JSONField( + blank=True, default=dict, null=True + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0110_auto_20200511_2245.py b/contentcuration/contentcuration/migrations/0110_auto_20200511_2245.py index 1ed1bdf269..77f297a38d 100644 --- a/contentcuration/contentcuration/migrations/0110_auto_20200511_2245.py +++ b/contentcuration/contentcuration/migrations/0110_auto_20200511_2245.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.28 on 2020-05-11 22:45 -from __future__ import unicode_literals - from django.db import migrations from django.db import models @@ -9,18 +7,18 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0109_auto_20191202_1759'), + ("contentcuration", "0109_auto_20191202_1759"), ] operations = [ migrations.AddField( - model_name='channel', - name='demo_server_url', + model_name="channel", + name="demo_server_url", field=models.CharField(blank=True, max_length=200), ), migrations.AddField( - model_name='channel', - name='source_url', + model_name="channel", + name="source_url", field=models.CharField(blank=True, max_length=200), ), ] diff --git a/contentcuration/contentcuration/migrations/0111_auto_20200513_2252.py b/contentcuration/contentcuration/migrations/0111_auto_20200513_2252.py index 8a539c6319..022cfa3769 100644 --- a/contentcuration/contentcuration/migrations/0111_auto_20200513_2252.py +++ b/contentcuration/contentcuration/migrations/0111_auto_20200513_2252.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2020-05-13 22:52 -from __future__ import unicode_literals - from django.db import migrations from django.db import models @@ -9,18 +7,18 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0110_auto_20200511_2245'), + ("contentcuration", "0110_auto_20200511_2245"), ] operations = [ migrations.AlterField( - model_name='channel', - name='demo_server_url', + model_name="channel", + name="demo_server_url", field=models.CharField(blank=True, max_length=200, null=True), ), migrations.AlterField( - model_name='channel', - name='source_url', + model_name="channel", + name="source_url", field=models.CharField(blank=True, max_length=200, null=True), ), ] diff --git a/contentcuration/contentcuration/migrations/0112_auto_20200613_0050.py b/contentcuration/contentcuration/migrations/0112_auto_20200613_0050.py index 9d352e0e84..8d11ae9c56 100644 --- a/contentcuration/contentcuration/migrations/0112_auto_20200613_0050.py +++ b/contentcuration/contentcuration/migrations/0112_auto_20200613_0050.py @@ -1,30 +1,94 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2020-06-13 00:50 -from __future__ import unicode_literals - -from django.db import migrations, models +from django.db import migrations +from django.db import models class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0111_auto_20200513_2252'), + ("contentcuration", "0111_auto_20200513_2252"), ] operations = [ migrations.AlterField( - model_name='contentkind', - name='kind', - field=models.CharField(choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', 'Exercise'), ('document', 'Document'), ('html5', 'HTML5 App'), ('slideshow', 'Slideshow'), ('h5p', 'H5P')], max_length=200, primary_key=True, serialize=False), + model_name="contentkind", + name="kind", + field=models.CharField( + choices=[ + ("topic", "Topic"), + ("video", "Video"), + ("audio", "Audio"), + ("exercise", "Exercise"), + ("document", "Document"), + ("html5", "HTML5 App"), + ("slideshow", "Slideshow"), + ("h5p", "H5P"), + ], + max_length=200, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('epub', 'ePub Document')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("epub", "ePub Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0113_channel_tagline.py b/contentcuration/contentcuration/migrations/0113_channel_tagline.py index bd2bde0014..142185faaa 100644 --- a/contentcuration/contentcuration/migrations/0113_channel_tagline.py +++ b/contentcuration/contentcuration/migrations/0113_channel_tagline.py @@ -1,20 +1,19 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2020-06-13 01:02 -from __future__ import unicode_literals - -from django.db import migrations, models +from django.db import migrations +from django.db import models class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0112_auto_20200613_0050'), + ("contentcuration", "0112_auto_20200613_0050"), ] operations = [ migrations.AddField( - model_name='channel', - name='tagline', + model_name="channel", + name="tagline", field=models.CharField(blank=True, max_length=150, null=True), ), ] diff --git a/contentcuration/contentcuration/migrations/0114_assessment_item_unique_keypair.py b/contentcuration/contentcuration/migrations/0114_assessment_item_unique_keypair.py index 1500d7531f..5da7fd6209 100644 --- a/contentcuration/contentcuration/migrations/0114_assessment_item_unique_keypair.py +++ b/contentcuration/contentcuration/migrations/0114_assessment_item_unique_keypair.py @@ -1,51 +1,49 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2021-01-11 20:55 -from __future__ import unicode_literals - from django.db import migrations import contentcuration.models -TABLE_NAME = 'contentcuration_assessmentitem' -INDEX_NAME = 'assessmentitem_unique_keypair' -CONSTRAINT_NAME = 'assessmentitem_unique_keypair_constraint' +TABLE_NAME = "contentcuration_assessmentitem" +INDEX_NAME = "assessmentitem_unique_keypair" +CONSTRAINT_NAME = "assessmentitem_unique_keypair_constraint" + class Migration(migrations.Migration): atomic = False dependencies = [ - ('contentcuration', '0113_channel_tagline'), + ("contentcuration", "0113_channel_tagline"), ] operations = [ migrations.SeparateDatabaseAndState( state_operations=[ migrations.AlterUniqueTogether( - name='assessmentitem', - unique_together=set([('contentnode', 'assessment_id')]), + name="assessmentitem", + unique_together=set([("contentnode", "assessment_id")]), ), ], database_operations=[ migrations.RunSQL( - sql='CREATE UNIQUE INDEX CONCURRENTLY {index_name} ON {table_name} USING btree (assessment_id, contentnode_id)'.format( + sql="CREATE UNIQUE INDEX CONCURRENTLY {index_name} ON {table_name} USING btree (assessment_id, contentnode_id)".format( index_name=INDEX_NAME, table_name=TABLE_NAME, ), - reverse_sql='DROP INDEX IF EXISTS {index_name}'.format( + reverse_sql="DROP INDEX IF EXISTS {index_name}".format( index_name=INDEX_NAME, ), ), migrations.RunSQL( - sql='ALTER TABLE {table_name} ADD CONSTRAINT {constraint_name} UNIQUE USING INDEX {index_name}'.format( + sql="ALTER TABLE {table_name} ADD CONSTRAINT {constraint_name} UNIQUE USING INDEX {index_name}".format( index_name=INDEX_NAME, table_name=TABLE_NAME, constraint_name=CONSTRAINT_NAME, ), - reverse_sql='ALTER TABLE {table_name} DROP CONSTRAINT {constraint_name}'.format( - table_name=TABLE_NAME, - constraint_name=CONSTRAINT_NAME + reverse_sql="ALTER TABLE {table_name} DROP CONSTRAINT {constraint_name}".format( + table_name=TABLE_NAME, constraint_name=CONSTRAINT_NAME ), - ) - ] + ), + ], ) ] diff --git a/contentcuration/contentcuration/migrations/0115_index_contentnode_node_id_field.py b/contentcuration/contentcuration/migrations/0115_index_contentnode_node_id_field.py index 04dc54c739..246b80972a 100644 --- a/contentcuration/contentcuration/migrations/0115_index_contentnode_node_id_field.py +++ b/contentcuration/contentcuration/migrations/0115_index_contentnode_node_id_field.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2020-08-06 20:20 -from __future__ import unicode_literals - from django.db import migrations from django.db import models diff --git a/contentcuration/contentcuration/migrations/0116_index_channel_contentnode_file.py b/contentcuration/contentcuration/migrations/0116_index_channel_contentnode_file.py index 695ee570e5..ce40ad640c 100644 --- a/contentcuration/contentcuration/migrations/0116_index_channel_contentnode_file.py +++ b/contentcuration/contentcuration/migrations/0116_index_channel_contentnode_file.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2020-09-15 18:39 -from __future__ import unicode_literals - from django.db import migrations from django.db import models @@ -11,7 +9,7 @@ class Migration(migrations.Migration): atomic = False dependencies = [ - ('contentcuration', '0115_index_contentnode_node_id_field'), + ("contentcuration", "0115_index_contentnode_node_id_field"), ] operations = [ @@ -35,7 +33,6 @@ class Migration(migrations.Migration): ), ], ), - migrations.SeparateDatabaseAndState( state_operations=[ migrations.AddIndex( @@ -58,12 +55,14 @@ class Migration(migrations.Migration): ), ], ), - migrations.SeparateDatabaseAndState( state_operations=[ migrations.AddIndex( model_name="file", - index=models.Index(fields=['checksum', 'file_size'], name="file_checksum_file_size_idx"), + index=models.Index( + fields=["checksum", "file_size"], + name="file_checksum_file_size_idx", + ), ), ], database_operations=[ @@ -79,5 +78,4 @@ class Migration(migrations.Migration): ), ], ), - ] diff --git a/contentcuration/contentcuration/migrations/0117_assessment_id_index.py b/contentcuration/contentcuration/migrations/0117_assessment_id_index.py index aee4e57372..31f2abc73d 100644 --- a/contentcuration/contentcuration/migrations/0117_assessment_id_index.py +++ b/contentcuration/contentcuration/migrations/0117_assessment_id_index.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2020-09-14 21:59 -from __future__ import unicode_literals - from django.db import migrations from django.db import models diff --git a/contentcuration/contentcuration/migrations/0118_relaunch_migrations.py b/contentcuration/contentcuration/migrations/0118_relaunch_migrations.py index 8d8c32ac1d..fac6d5f580 100644 --- a/contentcuration/contentcuration/migrations/0118_relaunch_migrations.py +++ b/contentcuration/contentcuration/migrations/0118_relaunch_migrations.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2021-01-14 15:50 -from __future__ import unicode_literals - import django.db.models.deletion import django.utils.timezone from django.conf import settings @@ -12,78 +10,108 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0117_assessment_id_index'), + ("contentcuration", "0117_assessment_id_index"), ] operations = [ migrations.AddField( - model_name='contentnode', - name='complete', + model_name="contentnode", + name="complete", field=models.NullBooleanField(), ), migrations.AddField( - model_name='invitation', - name='accepted', + model_name="invitation", + name="accepted", field=models.BooleanField(default=False), ), migrations.AddField( - model_name='invitation', - name='declined', + model_name="invitation", + name="declined", field=models.BooleanField(default=False), ), migrations.AddField( - model_name='invitation', - name='revoked', + model_name="invitation", + name="revoked", field=models.BooleanField(default=False), ), migrations.AddField( - model_name='user', - name='disk_space_used', - field=models.FloatField(default=0, help_text='How many bytes a user has uploaded'), + model_name="user", + name="disk_space_used", + field=models.FloatField( + default=0, help_text="How many bytes a user has uploaded" + ), ), migrations.AlterField( - model_name='channel', - name='preferences', - field=models.TextField(default='{"license": null, "language": null, "author": null, "aggregator": null, "provider": null, "copyright_holder": null, "license_description": null, "mastery_model": "num_correct_in_a_row_5", "m_value": 5, "n_value": 5, "auto_derive_video_thumbnail": true, "auto_derive_audio_thumbnail": true, "auto_derive_document_thumbnail": true, "auto_derive_html5_thumbnail": true, "auto_derive_exercise_thumbnail": true, "auto_randomize_questions": true}'), + model_name="channel", + name="preferences", + field=models.TextField( + default='{"license": null, "language": null, "author": null, "aggregator": null, "provider": null, "copyright_holder": null, "license_description": null, "mastery_model": "num_correct_in_a_row_5", "m_value": 5, "n_value": 5, "auto_derive_video_thumbnail": true, "auto_derive_audio_thumbnail": true, "auto_derive_document_thumbnail": true, "auto_derive_html5_thumbnail": true, "auto_derive_exercise_thumbnail": true, "auto_randomize_questions": true}' + ), ), migrations.AlterField( - model_name='contentnode', - name='created', - field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='created'), + model_name="contentnode", + name="created", + field=models.DateTimeField( + default=django.utils.timezone.now, verbose_name="created" + ), ), migrations.AlterField( - model_name='contentnode', - name='title', + model_name="contentnode", + name="title", field=models.CharField(blank=True, max_length=200), ), migrations.AlterField( - model_name='contenttag', - name='channel', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='tags', to='contentcuration.Channel'), + model_name="contenttag", + name="channel", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="tags", + to="contentcuration.Channel", + ), ), migrations.AlterField( - model_name='file', - name='uploaded_by', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='files', to=settings.AUTH_USER_MODEL), + model_name="file", + name="uploaded_by", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="files", + to=settings.AUTH_USER_MODEL, + ), ), migrations.AlterField( - model_name='invitation', - name='channel', - field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='pending_editors', to='contentcuration.Channel'), + model_name="invitation", + name="channel", + field=models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="pending_editors", + to="contentcuration.Channel", + ), ), migrations.AlterField( - model_name='invitation', - name='first_name', + model_name="invitation", + name="first_name", field=models.CharField(blank=True, max_length=100), ), migrations.AlterField( - model_name='invitation', - name='sender', - field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sent_by', to=settings.AUTH_USER_MODEL), + model_name="invitation", + name="sender", + field=models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="sent_by", + to=settings.AUTH_USER_MODEL, + ), ), migrations.AlterField( - model_name='user', - name='preferences', - field=models.TextField(default='{"license": null, "language": null, "author": null, "aggregator": null, "provider": null, "copyright_holder": null, "license_description": null, "mastery_model": "num_correct_in_a_row_5", "m_value": 5, "n_value": 5, "auto_derive_video_thumbnail": true, "auto_derive_audio_thumbnail": true, "auto_derive_document_thumbnail": true, "auto_derive_html5_thumbnail": true, "auto_derive_exercise_thumbnail": true, "auto_randomize_questions": true}'), + model_name="user", + name="preferences", + field=models.TextField( + default='{"license": null, "language": null, "author": null, "aggregator": null, "provider": null, "copyright_holder": null, "license_description": null, "mastery_model": "num_correct_in_a_row_5", "m_value": 5, "n_value": 5, "auto_derive_video_thumbnail": true, "auto_derive_audio_thumbnail": true, "auto_derive_document_thumbnail": true, "auto_derive_html5_thumbnail": true, "auto_derive_exercise_thumbnail": true, "auto_randomize_questions": true}' + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0119_task_channel_id.py b/contentcuration/contentcuration/migrations/0119_task_channel_id.py index bed2d82b4e..c20c8efa0a 100644 --- a/contentcuration/contentcuration/migrations/0119_task_channel_id.py +++ b/contentcuration/contentcuration/migrations/0119_task_channel_id.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2021-01-14 23:12 -from __future__ import unicode_literals - from django.db import migrations from django.db import models @@ -9,13 +7,13 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0118_relaunch_migrations'), + ("contentcuration", "0118_relaunch_migrations"), ] operations = [ migrations.AddField( - model_name='task', - name='channel_id', + model_name="task", + name="channel_id", field=models.UUIDField(blank=True, db_index=True, null=True), ), ] diff --git a/contentcuration/contentcuration/migrations/0120_auto_20210128_1646.py b/contentcuration/contentcuration/migrations/0120_auto_20210128_1646.py index 6ce83097b8..59d48db118 100644 --- a/contentcuration/contentcuration/migrations/0120_auto_20210128_1646.py +++ b/contentcuration/contentcuration/migrations/0120_auto_20210128_1646.py @@ -1,21 +1,25 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2021-01-28 16:46 -from __future__ import unicode_literals +import django.db.models.functions.text +from django.db import migrations +from django.db import models import contentcuration.models -from django.db import migrations, models -import django.db.models.functions.text class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0119_task_channel_id'), + ("contentcuration", "0119_task_channel_id"), ] operations = [ migrations.AddIndex( - model_name='user', - index=contentcuration.models.UniqueActiveUserIndex(django.db.models.functions.text.Lower('email'), condition=models.Q(('is_active', True)), name='contentcura_email_d4d492_idx'), + model_name="user", + index=contentcuration.models.UniqueActiveUserIndex( + django.db.models.functions.text.Lower("email"), + condition=models.Q(("is_active", True)), + name="contentcura_email_d4d492_idx", + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0121_auto_20210305_2028.py b/contentcuration/contentcuration/migrations/0121_auto_20210305_2028.py index dc68fea6ee..0e427a8281 100644 --- a/contentcuration/contentcuration/migrations/0121_auto_20210305_2028.py +++ b/contentcuration/contentcuration/migrations/0121_auto_20210305_2028.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2021-03-05 20:28 -from __future__ import unicode_literals - from django.db import migrations from django.db import models @@ -9,13 +7,13 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0120_auto_20210128_1646'), + ("contentcuration", "0120_auto_20210128_1646"), ] operations = [ migrations.AddField( - model_name='file', - name='modified', - field=models.DateTimeField(verbose_name='modified', null=True), + model_name="file", + name="modified", + field=models.DateTimeField(verbose_name="modified", null=True), ), ] diff --git a/contentcuration/contentcuration/migrations/0122_file_modified_index.py b/contentcuration/contentcuration/migrations/0122_file_modified_index.py index b83b432a4d..e247977453 100644 --- a/contentcuration/contentcuration/migrations/0122_file_modified_index.py +++ b/contentcuration/contentcuration/migrations/0122_file_modified_index.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2021-03-05 20:28 -from __future__ import unicode_literals - from django.db import migrations from django.db import models @@ -12,15 +10,17 @@ class Migration(migrations.Migration): atomic = False dependencies = [ - ('contentcuration', '0121_auto_20210305_2028'), + ("contentcuration", "0121_auto_20210305_2028"), ] operations = [ migrations.SeparateDatabaseAndState( state_operations=[ migrations.AddIndex( - model_name='file', - index=models.Index(fields=['-modified'], name=FILE_MODIFIED_DESC_INDEX_NAME), + model_name="file", + index=models.Index( + fields=["-modified"], name=FILE_MODIFIED_DESC_INDEX_NAME + ), ), ], database_operations=[ diff --git a/contentcuration/contentcuration/migrations/0123_auto_20210407_0057.py b/contentcuration/contentcuration/migrations/0123_auto_20210407_0057.py index e25ade1e36..896ddfb8e4 100644 --- a/contentcuration/contentcuration/migrations/0123_auto_20210407_0057.py +++ b/contentcuration/contentcuration/migrations/0123_auto_20210407_0057.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2021-04-07 00:57 -from __future__ import unicode_literals - from django.db import migrations from django.db import models @@ -9,13 +7,15 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0122_file_modified_index'), + ("contentcuration", "0122_file_modified_index"), ] operations = [ migrations.AlterField( - model_name='file', - name='modified', - field=models.DateTimeField(auto_now=True, null=True, verbose_name='modified'), + model_name="file", + name="modified", + field=models.DateTimeField( + auto_now=True, null=True, verbose_name="modified" + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0124_user_feature_flags.py b/contentcuration/contentcuration/migrations/0124_user_feature_flags.py index deb0a29783..bf9eccaf32 100644 --- a/contentcuration/contentcuration/migrations/0124_user_feature_flags.py +++ b/contentcuration/contentcuration/migrations/0124_user_feature_flags.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2021-04-27 20:39 -from __future__ import unicode_literals - import django.contrib.postgres.fields.jsonb from django.db import migrations @@ -9,13 +7,13 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0123_auto_20210407_0057'), + ("contentcuration", "0123_auto_20210407_0057"), ] operations = [ migrations.AddField( - model_name='user', - name='feature_flags', + model_name="user", + name="feature_flags", field=django.contrib.postgres.fields.jsonb.JSONField(null=True), ), ] diff --git a/contentcuration/contentcuration/migrations/0125_user_feature_flags_default.py b/contentcuration/contentcuration/migrations/0125_user_feature_flags_default.py index 59d2266dcf..60bca1b599 100644 --- a/contentcuration/contentcuration/migrations/0125_user_feature_flags_default.py +++ b/contentcuration/contentcuration/migrations/0125_user_feature_flags_default.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2021-04-27 20:39 -from __future__ import unicode_literals - import django.contrib.postgres.fields.jsonb from django.db import migrations @@ -9,13 +7,15 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0124_user_feature_flags'), + ("contentcuration", "0124_user_feature_flags"), ] operations = [ migrations.AlterField( - model_name='user', - name='feature_flags', - field=django.contrib.postgres.fields.jsonb.JSONField(default=dict, null=True), + model_name="user", + name="feature_flags", + field=django.contrib.postgres.fields.jsonb.JSONField( + default=dict, null=True + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0126_auto_20210219_2314.py b/contentcuration/contentcuration/migrations/0126_auto_20210219_2314.py index 82ee59c7c3..9cdf38902a 100644 --- a/contentcuration/contentcuration/migrations/0126_auto_20210219_2314.py +++ b/contentcuration/contentcuration/migrations/0126_auto_20210219_2314.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2021-02-19 23:14 -from __future__ import unicode_literals - from django.db import migrations from django.db import models @@ -9,13 +7,35 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0125_user_feature_flags_default'), + ("contentcuration", "0125_user_feature_flags_default"), ] operations = [ migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('webm', 'WEBM Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('epub', 'ePub Document')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("webm", "WEBM Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("epub", "ePub Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0127_auto_20210504_1744.py b/contentcuration/contentcuration/migrations/0127_auto_20210504_1744.py index ffd63ab8b0..409d1132fd 100644 --- a/contentcuration/contentcuration/migrations/0127_auto_20210504_1744.py +++ b/contentcuration/contentcuration/migrations/0127_auto_20210504_1744.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2021-05-04 17:44 -from __future__ import unicode_literals - from django.db import migrations from django.db import models @@ -9,13 +7,44 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0126_auto_20210219_2314'), + ("contentcuration", "0126_auto_20210219_2314"), ] operations = [ migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('audio_dependency', 'audio (dependency)'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("audio_dependency", "audio (dependency)"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0128_auto_20210511_1605.py b/contentcuration/contentcuration/migrations/0128_auto_20210511_1605.py index 97ec962be5..ca3456bf99 100644 --- a/contentcuration/contentcuration/migrations/0128_auto_20210511_1605.py +++ b/contentcuration/contentcuration/migrations/0128_auto_20210511_1605.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2021-05-11 16:05 -from __future__ import unicode_literals - from django.db import migrations from django.db import models @@ -9,13 +7,46 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0127_auto_20210504_1744'), + ("contentcuration", "0127_auto_20210504_1744"), ] operations = [ migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('audio_dependency', 'audio (dependency)'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('qti', 'QTI Zip'), ('qti_thumbnail', 'QTI Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("audio_dependency", "audio (dependency)"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("qti", "QTI Zip"), + ("qti_thumbnail", "QTI Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0129_auto_20210519_2213.py b/contentcuration/contentcuration/migrations/0129_auto_20210519_2213.py index e46b0d2cac..c84c9267d7 100644 --- a/contentcuration/contentcuration/migrations/0129_auto_20210519_2213.py +++ b/contentcuration/contentcuration/migrations/0129_auto_20210519_2213.py @@ -1,159 +1,247 @@ # Generated by Django 3.2.3 on 2021-05-19 22:13 - -from django.db import migrations, models import django.db.models.deletion +from django.db import migrations +from django.db import models class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0128_auto_20210511_1605'), + ("contentcuration", "0128_auto_20210511_1605"), ] operations = [ migrations.AlterField( - model_name='channel', - name='chef_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channel_chef', to='contentcuration.contentnode'), - ), - migrations.AlterField( - model_name='channel', - name='clipboard_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channel_clipboard', to='contentcuration.contentnode'), - ), - migrations.AlterField( - model_name='channel', - name='content_defaults', + model_name="channel", + name="chef_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="channel_chef", + to="contentcuration.contentnode", + ), + ), + migrations.AlterField( + model_name="channel", + name="clipboard_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="channel_clipboard", + to="contentcuration.contentnode", + ), + ), + migrations.AlterField( + model_name="channel", + name="content_defaults", field=models.JSONField(default=dict), ), migrations.AlterField( - model_name='channel', - name='language', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channel_language', to='contentcuration.language'), - ), - migrations.AlterField( - model_name='channel', - name='main_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channel_main', to='contentcuration.contentnode'), - ), - migrations.AlterField( - model_name='channel', - name='previous_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channel_previous', to='contentcuration.contentnode'), - ), - migrations.AlterField( - model_name='channel', - name='published_data', + model_name="channel", + name="language", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="channel_language", + to="contentcuration.language", + ), + ), + migrations.AlterField( + model_name="channel", + name="main_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="channel_main", + to="contentcuration.contentnode", + ), + ), + migrations.AlterField( + model_name="channel", + name="previous_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="channel_previous", + to="contentcuration.contentnode", + ), + ), + migrations.AlterField( + model_name="channel", + name="published_data", field=models.JSONField(default=dict), ), migrations.AlterField( - model_name='channel', - name='staging_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channel_staging', to='contentcuration.contentnode'), + model_name="channel", + name="staging_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="channel_staging", + to="contentcuration.contentnode", + ), ), migrations.AlterField( - model_name='channel', - name='thumbnail_encoding', + model_name="channel", + name="thumbnail_encoding", field=models.JSONField(default=dict), ), migrations.AlterField( - model_name='channel', - name='trash_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channel_trash', to='contentcuration.contentnode'), + model_name="channel", + name="trash_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="channel_trash", + to="contentcuration.contentnode", + ), ), migrations.AlterField( - model_name='contentnode', - name='complete', + model_name="contentnode", + name="complete", field=models.BooleanField(null=True), ), migrations.AlterField( - model_name='contentnode', - name='extra_fields', + model_name="contentnode", + name="extra_fields", field=models.JSONField(blank=True, default=dict, null=True), ), migrations.AlterField( - model_name='contentnode', - name='kind', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='contentnodes', to='contentcuration.contentkind'), + model_name="contentnode", + name="kind", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="contentnodes", + to="contentcuration.contentkind", + ), ), migrations.AlterField( - model_name='contentnode', - name='language', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='content_language', to='contentcuration.language'), + model_name="contentnode", + name="language", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="content_language", + to="contentcuration.language", + ), ), migrations.AlterField( - model_name='contentnode', - name='level', + model_name="contentnode", + name="level", field=models.PositiveIntegerField(editable=False), ), migrations.AlterField( - model_name='contentnode', - name='lft', + model_name="contentnode", + name="lft", field=models.PositiveIntegerField(editable=False), ), migrations.AlterField( - model_name='contentnode', - name='license', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='contentcuration.license'), + model_name="contentnode", + name="license", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + to="contentcuration.license", + ), ), migrations.AlterField( - model_name='contentnode', - name='rght', + model_name="contentnode", + name="rght", field=models.PositiveIntegerField(editable=False), ), migrations.AlterField( - model_name='file', - name='file_format', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='files', to='contentcuration.fileformat'), - ), - migrations.AlterField( - model_name='file', - name='language', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='files', to='contentcuration.language'), - ), - migrations.AlterField( - model_name='file', - name='preset', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='files', to='contentcuration.formatpreset'), - ), - migrations.AlterField( - model_name='formatpreset', - name='kind', - field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='format_presets', to='contentcuration.contentkind'), - ), - migrations.AlterField( - model_name='slideshowslide', - name='metadata', + model_name="file", + name="file_format", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="files", + to="contentcuration.fileformat", + ), + ), + migrations.AlterField( + model_name="file", + name="language", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="files", + to="contentcuration.language", + ), + ), + migrations.AlterField( + model_name="file", + name="preset", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="files", + to="contentcuration.formatpreset", + ), + ), + migrations.AlterField( + model_name="formatpreset", + name="kind", + field=models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="format_presets", + to="contentcuration.contentkind", + ), + ), + migrations.AlterField( + model_name="slideshowslide", + name="metadata", field=models.JSONField(default=dict), ), migrations.AlterField( - model_name='task', - name='metadata', + model_name="task", + name="metadata", field=models.JSONField(), ), migrations.AlterField( - model_name='user', - name='clipboard_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='user_clipboard', to='contentcuration.contentnode'), + model_name="user", + name="clipboard_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="user_clipboard", + to="contentcuration.contentnode", + ), ), migrations.AlterField( - model_name='user', - name='content_defaults', + model_name="user", + name="content_defaults", field=models.JSONField(default=dict), ), migrations.AlterField( - model_name='user', - name='feature_flags', + model_name="user", + name="feature_flags", field=models.JSONField(default=dict, null=True), ), migrations.AlterField( - model_name='user', - name='information', + model_name="user", + name="information", field=models.JSONField(null=True), ), migrations.AlterField( - model_name='user', - name='policies', + model_name="user", + name="policies", field=models.JSONField(default=dict, null=True), ), ] diff --git a/contentcuration/contentcuration/migrations/0130_auto_20210706_2005.py b/contentcuration/contentcuration/migrations/0130_auto_20210706_2005.py index ac3a7a19d3..2a7b4e076f 100644 --- a/contentcuration/contentcuration/migrations/0130_auto_20210706_2005.py +++ b/contentcuration/contentcuration/migrations/0130_auto_20210706_2005.py @@ -6,23 +6,96 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0129_auto_20210519_2213'), + ("contentcuration", "0129_auto_20210519_2213"), ] operations = [ migrations.AlterField( - model_name='contentkind', - name='kind', - field=models.CharField(choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', 'Exercise'), ('document', 'Document'), ('html5', 'HTML5 App'), ('slideshow', 'Slideshow'), ('h5p', 'H5P'), ('zim', 'Zim')], max_length=200, primary_key=True, serialize=False), + model_name="contentkind", + name="kind", + field=models.CharField( + choices=[ + ("topic", "Topic"), + ("video", "Video"), + ("audio", "Audio"), + ("exercise", "Exercise"), + ("document", "Document"), + ("html5", "HTML5 App"), + ("slideshow", "Slideshow"), + ("h5p", "H5P"), + ("zim", "Zim"), + ], + max_length=200, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('webm', 'WEBM Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('zim', 'ZIM'), ('epub', 'ePub Document')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("webm", "WEBM Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("zim", "ZIM"), + ("epub", "ePub Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('audio_dependency', 'audio (dependency)'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('zim', 'Zim'), ('zim_thumbnail', 'Zim Thumbnail'), ('qti', 'QTI Zip'), ('qti_thumbnail', 'QTI Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("audio_dependency", "audio (dependency)"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("zim", "Zim"), + ("zim_thumbnail", "Zim Thumbnail"), + ("qti", "QTI Zip"), + ("qti_thumbnail", "QTI Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0131_auto_20210707_2326.py b/contentcuration/contentcuration/migrations/0131_auto_20210707_2326.py index b27a9f14f0..27346f7b6d 100644 --- a/contentcuration/contentcuration/migrations/0131_auto_20210707_2326.py +++ b/contentcuration/contentcuration/migrations/0131_auto_20210707_2326.py @@ -6,11 +6,11 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0130_auto_20210706_2005'), + ("contentcuration", "0130_auto_20210706_2005"), ] operations = [ migrations.DeleteModel( - name='ChannelResourceSize', + name="ChannelResourceSize", ), ] diff --git a/contentcuration/contentcuration/migrations/0132_auto_20210708_0011.py b/contentcuration/contentcuration/migrations/0132_auto_20210708_0011.py index 16c715d29e..7d8bbcfd1c 100644 --- a/contentcuration/contentcuration/migrations/0132_auto_20210708_0011.py +++ b/contentcuration/contentcuration/migrations/0132_auto_20210708_0011.py @@ -6,11 +6,11 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0131_auto_20210707_2326'), + ("contentcuration", "0131_auto_20210707_2326"), ] operations = [ migrations.DeleteModel( - name='Exercise', + name="Exercise", ), ] diff --git a/contentcuration/contentcuration/migrations/0133_auto_20220124_2149.py b/contentcuration/contentcuration/migrations/0133_auto_20220124_2149.py index ef0fec8d19..52c9f0f115 100644 --- a/contentcuration/contentcuration/migrations/0133_auto_20220124_2149.py +++ b/contentcuration/contentcuration/migrations/0133_auto_20220124_2149.py @@ -6,17 +6,27 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0132_auto_20210708_0011'), + ("contentcuration", "0132_auto_20210708_0011"), ] operations = [ migrations.AddField( - model_name='file', - name='duration', + model_name="file", + name="duration", field=models.IntegerField(blank=True, null=True), ), migrations.AddConstraint( - model_name='file', - constraint=models.CheckConstraint(check=models.Q(models.Q(('duration__gt', 0), ('preset__in', ['audio', 'high_res_video', 'low_res_video'])), ('duration__isnull', True), _connector='OR'), name='file_media_duration_int'), + model_name="file", + constraint=models.CheckConstraint( + check=models.Q( + models.Q( + ("duration__gt", 0), + ("preset__in", ["audio", "high_res_video", "low_res_video"]), + ), + ("duration__isnull", True), + _connector="OR", + ), + name="file_media_duration_int", + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0135_add_metadata_labels.py b/contentcuration/contentcuration/migrations/0135_add_metadata_labels.py index f1332bc008..afcf247b53 100644 --- a/contentcuration/contentcuration/migrations/0135_add_metadata_labels.py +++ b/contentcuration/contentcuration/migrations/0135_add_metadata_labels.py @@ -6,38 +6,38 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0134_alter_contentkind_kind'), + ("contentcuration", "0134_alter_contentkind_kind"), ] operations = [ migrations.AddField( - model_name='contentnode', - name='accessibility_labels', + model_name="contentnode", + name="accessibility_labels", field=models.JSONField(blank=True, null=True), ), migrations.AddField( - model_name='contentnode', - name='categories', + model_name="contentnode", + name="categories", field=models.JSONField(blank=True, null=True), ), migrations.AddField( - model_name='contentnode', - name='grade_levels', + model_name="contentnode", + name="grade_levels", field=models.JSONField(blank=True, null=True), ), migrations.AddField( - model_name='contentnode', - name='learner_needs', + model_name="contentnode", + name="learner_needs", field=models.JSONField(blank=True, null=True), ), migrations.AddField( - model_name='contentnode', - name='learning_activities', + model_name="contentnode", + name="learning_activities", field=models.JSONField(blank=True, null=True), ), migrations.AddField( - model_name='contentnode', - name='resource_types', + model_name="contentnode", + name="resource_types", field=models.JSONField(blank=True, null=True), ), ] diff --git a/contentcuration/contentcuration/migrations/0136_contentnode_suggested_duration.py b/contentcuration/contentcuration/migrations/0136_contentnode_suggested_duration.py index aff7b5641b..4411bb60ff 100644 --- a/contentcuration/contentcuration/migrations/0136_contentnode_suggested_duration.py +++ b/contentcuration/contentcuration/migrations/0136_contentnode_suggested_duration.py @@ -6,13 +6,17 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0135_add_metadata_labels'), + ("contentcuration", "0135_add_metadata_labels"), ] operations = [ migrations.AddField( - model_name='contentnode', - name='suggested_duration', - field=models.IntegerField(blank=True, help_text='Suggested duration for the content node (in seconds)', null=True), + model_name="contentnode", + name="suggested_duration", + field=models.IntegerField( + blank=True, + help_text="Suggested duration for the content node (in seconds)", + null=True, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0137_channelhistory.py b/contentcuration/contentcuration/migrations/0137_channelhistory.py index e5f692c054..0edaff77bf 100644 --- a/contentcuration/contentcuration/migrations/0137_channelhistory.py +++ b/contentcuration/contentcuration/migrations/0137_channelhistory.py @@ -9,26 +9,61 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0136_contentnode_suggested_duration'), + ("contentcuration", "0136_contentnode_suggested_duration"), ] operations = [ migrations.CreateModel( - name='ChannelHistory', + name="ChannelHistory", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('performed', models.DateTimeField(default=django.utils.timezone.now)), - ('action', models.CharField(choices=[('creation', 'Creation'), ('publication', 'Publication'), ('deletion', 'Deletion'), ('recovery', 'Deletion recovery')], max_length=50)), - ('actor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='channel_history', to=settings.AUTH_USER_MODEL)), - ('channel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='history', to='contentcuration.channel')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("performed", models.DateTimeField(default=django.utils.timezone.now)), + ( + "action", + models.CharField( + choices=[ + ("creation", "Creation"), + ("publication", "Publication"), + ("deletion", "Deletion"), + ("recovery", "Deletion recovery"), + ], + max_length=50, + ), + ), + ( + "actor", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_history", + to=settings.AUTH_USER_MODEL, + ), + ), + ( + "channel", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="history", + to="contentcuration.channel", + ), + ), ], options={ - 'verbose_name': 'Channel history', - 'verbose_name_plural': 'Channel histories', + "verbose_name": "Channel history", + "verbose_name_plural": "Channel histories", }, ), migrations.AddIndex( - model_name='channelhistory', - index=models.Index(fields=['channel_id'], name='idx_channel_history_channel_id'), + model_name="channelhistory", + index=models.Index( + fields=["channel_id"], name="idx_channel_history_channel_id" + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0138_change.py b/contentcuration/contentcuration/migrations/0138_change.py index f18ee52320..504596a5f1 100644 --- a/contentcuration/contentcuration/migrations/0138_change.py +++ b/contentcuration/contentcuration/migrations/0138_change.py @@ -9,25 +9,62 @@ class Migration(migrations.Migration): dependencies = [ - ('sessions', '0001_initial'), - ('contentcuration', '0137_channelhistory'), + ("sessions", "0001_initial"), + ("contentcuration", "0137_channelhistory"), ] operations = [ migrations.CreateModel( - name='Change', + name="Change", fields=[ - ('server_rev', models.BigAutoField(primary_key=True, serialize=False)), - ('client_rev', models.IntegerField(blank=True, null=True)), - ('table', models.CharField(max_length=32)), - ('change_type', models.IntegerField()), - ('kwargs', models.JSONField(encoder=rest_framework.utils.encoders.JSONEncoder)), - ('applied', models.BooleanField(default=False)), - ('errored', models.BooleanField(default=False)), - ('channel', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='contentcuration.channel')), - ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='changes_by_user', to=settings.AUTH_USER_MODEL)), - ('session', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sessions.session')), - ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='changes_about_user', to=settings.AUTH_USER_MODEL)), + ("server_rev", models.BigAutoField(primary_key=True, serialize=False)), + ("client_rev", models.IntegerField(blank=True, null=True)), + ("table", models.CharField(max_length=32)), + ("change_type", models.IntegerField()), + ( + "kwargs", + models.JSONField(encoder=rest_framework.utils.encoders.JSONEncoder), + ), + ("applied", models.BooleanField(default=False)), + ("errored", models.BooleanField(default=False)), + ( + "channel", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="contentcuration.channel", + ), + ), + ( + "created_by", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="changes_by_user", + to=settings.AUTH_USER_MODEL, + ), + ), + ( + "session", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + to="sessions.session", + ), + ), + ( + "user", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="changes_about_user", + to=settings.AUTH_USER_MODEL, + ), + ), ], ), ] diff --git a/contentcuration/contentcuration/migrations/0139_django_celery_results.py b/contentcuration/contentcuration/migrations/0139_django_celery_results.py index f6b37e2c28..1a8f65211c 100644 --- a/contentcuration/contentcuration/migrations/0139_django_celery_results.py +++ b/contentcuration/contentcuration/migrations/0139_django_celery_results.py @@ -8,31 +8,45 @@ class Migration(migrations.Migration): - replaces = [('django_celery_results', '0138_change'),] + replaces = [ + ("django_celery_results", "0138_change"), + ] def __init__(self, name, app_label): - super(Migration, self).__init__(name, 'django_celery_results') + super(Migration, self).__init__(name, "django_celery_results") dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), - ('contentcuration', '0138_change'), - ('django_celery_results', '0001_initial'), + ("contentcuration", "0138_change"), + ("django_celery_results", "0001_initial"), ] operations = [ migrations.AddField( - model_name='taskresult', - name='channel_id', + model_name="taskresult", + name="channel_id", field=models.UUIDField(blank=True, db_index=True, null=True), ), migrations.AddField( - model_name='taskresult', - name='progress', - field=models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]), + model_name="taskresult", + name="progress", + field=models.IntegerField( + blank=True, + null=True, + validators=[ + django.core.validators.MinValueValidator(0), + django.core.validators.MaxValueValidator(100), + ], + ), ), migrations.AddField( - model_name='taskresult', - name='user', - field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to=settings.AUTH_USER_MODEL), + model_name="taskresult", + name="user", + field=models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="tasks", + to=settings.AUTH_USER_MODEL, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0140_delete_task.py b/contentcuration/contentcuration/migrations/0140_delete_task.py index ec2108fdf2..f654a2fb5b 100644 --- a/contentcuration/contentcuration/migrations/0140_delete_task.py +++ b/contentcuration/contentcuration/migrations/0140_delete_task.py @@ -5,11 +5,11 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0139_django_celery_results'), + ("contentcuration", "0139_django_celery_results"), ] operations = [ migrations.DeleteModel( - name='Task', + name="Task", ), ] diff --git a/contentcuration/contentcuration/migrations/0141_add_task_signature.py b/contentcuration/contentcuration/migrations/0141_add_task_signature.py index 4e182e8fa1..3f113f438d 100644 --- a/contentcuration/contentcuration/migrations/0141_add_task_signature.py +++ b/contentcuration/contentcuration/migrations/0141_add_task_signature.py @@ -5,24 +5,37 @@ class Migration(migrations.Migration): - replaces = [('django_celery_results', '0140_delete_task'),] + replaces = [ + ("django_celery_results", "0140_delete_task"), + ] def __init__(self, name, app_label): - super(Migration, self).__init__(name, 'django_celery_results') + super(Migration, self).__init__(name, "django_celery_results") dependencies = [ - ('contentcuration', '0140_delete_task'), - ('django_celery_results', '0011_taskresult_periodic_task_name'), + ("contentcuration", "0140_delete_task"), + ("django_celery_results", "0011_taskresult_periodic_task_name"), ] operations = [ migrations.AddField( - model_name='taskresult', - name='signature', + model_name="taskresult", + name="signature", field=models.CharField(max_length=32, null=True), ), migrations.AddIndex( - model_name='taskresult', - index=models.Index(condition=models.Q(('status__in', frozenset(['STARTED', 'REJECTED', 'RETRY', 'RECEIVED', 'PENDING']))), fields=['signature'], name='task_result_signature_idx'), + model_name="taskresult", + index=models.Index( + condition=models.Q( + ( + "status__in", + frozenset( + ["STARTED", "REJECTED", "RETRY", "RECEIVED", "PENDING"] + ), + ) + ), + fields=["signature"], + name="task_result_signature_idx", + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0142_remove_file_file_media_duration_int.py b/contentcuration/contentcuration/migrations/0142_remove_file_file_media_duration_int.py index e497fbd398..71e2b4f2bc 100644 --- a/contentcuration/contentcuration/migrations/0142_remove_file_file_media_duration_int.py +++ b/contentcuration/contentcuration/migrations/0142_remove_file_file_media_duration_int.py @@ -5,12 +5,12 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0141_add_task_signature'), + ("contentcuration", "0141_add_task_signature"), ] operations = [ migrations.RemoveConstraint( - model_name='file', - name='file_media_duration_int', + model_name="file", + name="file_media_duration_int", ), ] diff --git a/contentcuration/contentcuration/migrations/0143_file_file_media_duration_int.py b/contentcuration/contentcuration/migrations/0143_file_file_media_duration_int.py index 3a7dbae1a0..c67a5f068c 100644 --- a/contentcuration/contentcuration/migrations/0143_file_file_media_duration_int.py +++ b/contentcuration/contentcuration/migrations/0143_file_file_media_duration_int.py @@ -6,12 +6,31 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0142_remove_file_file_media_duration_int'), + ("contentcuration", "0142_remove_file_file_media_duration_int"), ] operations = [ migrations.AddConstraint( - model_name='file', - constraint=models.CheckConstraint(check=models.Q(models.Q(('duration__gt', 0), ('preset__in', ['audio', 'audio_dependency', 'high_res_video', 'low_res_video', 'video_dependency'])), ('duration__isnull', True), _connector='OR'), name='file_media_duration_int'), + model_name="file", + constraint=models.CheckConstraint( + check=models.Q( + models.Q( + ("duration__gt", 0), + ( + "preset__in", + [ + "audio", + "audio_dependency", + "high_res_video", + "low_res_video", + "video_dependency", + ], + ), + ), + ("duration__isnull", True), + _connector="OR", + ), + name="file_media_duration_int", + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0144_soft_delete_user.py b/contentcuration/contentcuration/migrations/0144_soft_delete_user.py index a04040df69..d2a778ed34 100644 --- a/contentcuration/contentcuration/migrations/0144_soft_delete_user.py +++ b/contentcuration/contentcuration/migrations/0144_soft_delete_user.py @@ -9,23 +9,53 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0143_file_file_media_duration_int'), + ("contentcuration", "0143_file_file_media_duration_int"), ] operations = [ migrations.AddField( - model_name='user', - name='deleted', + model_name="user", + name="deleted", field=models.BooleanField(db_index=True, default=False), ), migrations.CreateModel( - name='UserHistory', + name="UserHistory", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('action', models.CharField(choices=[('soft-deletion', 'User soft deletion'), ('soft-recovery', - 'User soft deletion recovery'), ('related-data-hard-deletion', 'User related data hard deletion')], max_length=32)), - ('performed_at', models.DateTimeField(default=django.utils.timezone.now)), - ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='history', to=settings.AUTH_USER_MODEL)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "action", + models.CharField( + choices=[ + ("soft-deletion", "User soft deletion"), + ("soft-recovery", "User soft deletion recovery"), + ( + "related-data-hard-deletion", + "User related data hard deletion", + ), + ], + max_length=32, + ), + ), + ( + "performed_at", + models.DateTimeField(default=django.utils.timezone.now), + ), + ( + "user", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="history", + to=settings.AUTH_USER_MODEL, + ), + ), ], ), ] diff --git a/contentcuration/contentcuration/migrations/0145_custom_task_metadata.py b/contentcuration/contentcuration/migrations/0145_custom_task_metadata.py index 64287039f0..6e4743a878 100644 --- a/contentcuration/contentcuration/migrations/0145_custom_task_metadata.py +++ b/contentcuration/contentcuration/migrations/0145_custom_task_metadata.py @@ -6,9 +6,10 @@ from django.db import migrations from django.db import models + def transfer_data(apps, schema_editor): - CustomTaskMetadata = apps.get_model('contentcuration', 'CustomTaskMetadata') - TaskResult = apps.get_model('django_celery_results', 'taskresult') + CustomTaskMetadata = apps.get_model("contentcuration", "CustomTaskMetadata") + TaskResult = apps.get_model("django_celery_results", "taskresult") old_task_results = TaskResult.objects.filter(status__in=states.UNREADY_STATES) @@ -21,28 +22,62 @@ def transfer_data(apps, schema_editor): signature=old_task_result.signature, ) + class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0144_soft_delete_user'), + ("contentcuration", "0144_soft_delete_user"), ] operations = [ migrations.CreateModel( - name='CustomTaskMetadata', + name="CustomTaskMetadata", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('task_id', models.CharField(max_length=255, unique=True)), - ('channel_id', models.UUIDField(blank=True, db_index=True, null=True)), - ('progress', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])), - ('signature', models.CharField(max_length=32, null=True)), - ('date_created', models.DateTimeField(auto_now_add=True, help_text='Datetime field when the custom_metadata for task was created in UTC', verbose_name='Created DateTime')), - ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to=settings.AUTH_USER_MODEL)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("task_id", models.CharField(max_length=255, unique=True)), + ("channel_id", models.UUIDField(blank=True, db_index=True, null=True)), + ( + "progress", + models.IntegerField( + blank=True, + null=True, + validators=[ + django.core.validators.MinValueValidator(0), + django.core.validators.MaxValueValidator(100), + ], + ), + ), + ("signature", models.CharField(max_length=32, null=True)), + ( + "date_created", + models.DateTimeField( + auto_now_add=True, + help_text="Datetime field when the custom_metadata for task was created in UTC", + verbose_name="Created DateTime", + ), + ), + ( + "user", + models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="tasks", + to=settings.AUTH_USER_MODEL, + ), + ), ], ), migrations.AddIndex( - model_name='customtaskmetadata', - index=models.Index(fields=['signature'], name='task_result_signature'), + model_name="customtaskmetadata", + index=models.Index(fields=["signature"], name="task_result_signature"), ), migrations.RunPython(transfer_data), ] diff --git a/contentcuration/contentcuration/migrations/0146_drop_taskresult_fields.py b/contentcuration/contentcuration/migrations/0146_drop_taskresult_fields.py index 5ecc6cb98f..0d288db47f 100644 --- a/contentcuration/contentcuration/migrations/0146_drop_taskresult_fields.py +++ b/contentcuration/contentcuration/migrations/0146_drop_taskresult_fields.py @@ -1,37 +1,40 @@ # Generated by Django 3.2.19 on 2023-09-14 10:42 from django.db import migrations + class Migration(migrations.Migration): - replaces = [('django_celery_results', '0145_custom_task_metadata'),] + replaces = [ + ("django_celery_results", "0145_custom_task_metadata"), + ] def __init__(self, name, app_label): - super(Migration, self).__init__(name, 'django_celery_results') + super(Migration, self).__init__(name, "django_celery_results") dependencies = [ - ('contentcuration', '0145_custom_task_metadata'), - ('contentcuration', '0141_add_task_signature'), + ("contentcuration", "0145_custom_task_metadata"), + ("contentcuration", "0141_add_task_signature"), ] operations = [ migrations.RemoveField( - model_name='taskresult', - name='channel_id', + model_name="taskresult", + name="channel_id", ), migrations.RemoveField( - model_name='taskresult', - name='progress', + model_name="taskresult", + name="progress", ), migrations.RemoveField( - model_name='taskresult', - name='user', + model_name="taskresult", + name="user", ), migrations.RemoveField( - model_name='taskresult', - name='signature', + model_name="taskresult", + name="signature", ), migrations.RemoveIndex( - model_name='taskresult', - name='task_result_signature_idx', + model_name="taskresult", + name="task_result_signature_idx", ), ] diff --git a/contentcuration/contentcuration/migrations/0147_alter_formatpreset_id.py b/contentcuration/contentcuration/migrations/0147_alter_formatpreset_id.py index ac3faa8904..8db529797f 100644 --- a/contentcuration/contentcuration/migrations/0147_alter_formatpreset_id.py +++ b/contentcuration/contentcuration/migrations/0147_alter_formatpreset_id.py @@ -6,13 +6,49 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0146_drop_taskresult_fields'), + ("contentcuration", "0146_drop_taskresult_fields"), ] operations = [ migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('audio_dependency', 'audio (dependency)'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('zim', 'Zim'), ('zim_thumbnail', 'Zim Thumbnail'), ('qti', 'QTI Zip'), ('qti_thumbnail', 'QTI Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest'), ('imscp_zip', 'IMSCP Zip')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("audio_dependency", "audio (dependency)"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("zim", "Zim"), + ("zim_thumbnail", "Zim Thumbnail"), + ("qti", "QTI Zip"), + ("qti_thumbnail", "QTI Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ("imscp_zip", "IMSCP Zip"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0148_flagfeedbackevent_recommendationsevent_recommendationsinteractionevent.py b/contentcuration/contentcuration/migrations/0148_flagfeedbackevent_recommendationsevent_recommendationsinteractionevent.py index ea3b80c86d..8a4ccaaa32 100644 --- a/contentcuration/contentcuration/migrations/0148_flagfeedbackevent_recommendationsevent_recommendationsinteractionevent.py +++ b/contentcuration/contentcuration/migrations/0148_flagfeedbackevent_recommendationsevent_recommendationsinteractionevent.py @@ -10,58 +10,120 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0147_alter_formatpreset_id'), + ("contentcuration", "0147_alter_formatpreset_id"), ] operations = [ migrations.CreateModel( - name='RecommendationsInteractionEvent', + name="RecommendationsInteractionEvent", fields=[ - ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), - ('context', models.JSONField()), - ('created_at', models.DateTimeField(auto_now_add=True)), - ('contentnode_id', models.UUIDField()), - ('content_id', models.UUIDField()), - ('feedback_type', models.CharField(choices=[('IMPORTED', 'Imported'), ('REJECTED', 'Rejected'), ('PREVIEWED', 'Previewed'), ('SHOWMORE', 'Show More'), ('IGNORED', 'Ignored'), ('FLAGGED', 'Flagged')], max_length=50)), - ('feedback_reason', models.TextField(max_length=1500)), - ('recommendation_event_id', models.UUIDField()), + ( + "id", + models.UUIDField( + default=uuid.uuid4, + editable=False, + primary_key=True, + serialize=False, + ), + ), + ("context", models.JSONField()), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("contentnode_id", models.UUIDField()), + ("content_id", models.UUIDField()), + ( + "feedback_type", + models.CharField( + choices=[ + ("IMPORTED", "Imported"), + ("REJECTED", "Rejected"), + ("PREVIEWED", "Previewed"), + ("SHOWMORE", "Show More"), + ("IGNORED", "Ignored"), + ("FLAGGED", "Flagged"), + ], + max_length=50, + ), + ), + ("feedback_reason", models.TextField(max_length=1500)), + ("recommendation_event_id", models.UUIDField()), ], options={ - 'abstract': False, + "abstract": False, }, ), migrations.CreateModel( - name='RecommendationsEvent', + name="RecommendationsEvent", fields=[ - ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), - ('context', models.JSONField()), - ('created_at', models.DateTimeField(auto_now_add=True)), - ('contentnode_id', models.UUIDField()), - ('content_id', models.UUIDField()), - ('target_channel_id', models.UUIDField()), - ('time_hidden', models.DateTimeField()), - ('content', models.JSONField(default=list)), - ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), + ( + "id", + models.UUIDField( + default=uuid.uuid4, + editable=False, + primary_key=True, + serialize=False, + ), + ), + ("context", models.JSONField()), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("contentnode_id", models.UUIDField()), + ("content_id", models.UUIDField()), + ("target_channel_id", models.UUIDField()), + ("time_hidden", models.DateTimeField()), + ("content", models.JSONField(default=list)), + ( + "user", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + to=settings.AUTH_USER_MODEL, + ), + ), ], options={ - 'abstract': False, + "abstract": False, }, ), migrations.CreateModel( - name='FlagFeedbackEvent', + name="FlagFeedbackEvent", fields=[ - ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), - ('context', models.JSONField()), - ('created_at', models.DateTimeField(auto_now_add=True)), - ('contentnode_id', models.UUIDField()), - ('content_id', models.UUIDField()), - ('target_channel_id', models.UUIDField()), - ('feedback_type', models.CharField(choices=[('IMPORTED', 'Imported'), ('REJECTED', 'Rejected'), ('PREVIEWED', 'Previewed'), ('SHOWMORE', 'Show More'), ('IGNORED', 'Ignored'), ('FLAGGED', 'Flagged')], max_length=50)), - ('feedback_reason', models.TextField(max_length=1500)), - ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), + ( + "id", + models.UUIDField( + default=uuid.uuid4, + editable=False, + primary_key=True, + serialize=False, + ), + ), + ("context", models.JSONField()), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("contentnode_id", models.UUIDField()), + ("content_id", models.UUIDField()), + ("target_channel_id", models.UUIDField()), + ( + "feedback_type", + models.CharField( + choices=[ + ("IMPORTED", "Imported"), + ("REJECTED", "Rejected"), + ("PREVIEWED", "Previewed"), + ("SHOWMORE", "Show More"), + ("IGNORED", "Ignored"), + ("FLAGGED", "Flagged"), + ], + max_length=50, + ), + ), + ("feedback_reason", models.TextField(max_length=1500)), + ( + "user", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + to=settings.AUTH_USER_MODEL, + ), + ), ], options={ - 'abstract': False, + "abstract": False, }, ), ] diff --git a/contentcuration/contentcuration/migrations/0149_unpublishable_change_field.py b/contentcuration/contentcuration/migrations/0149_unpublishable_change_field.py index a1ebff4d29..d1a7d9086b 100644 --- a/contentcuration/contentcuration/migrations/0149_unpublishable_change_field.py +++ b/contentcuration/contentcuration/migrations/0149_unpublishable_change_field.py @@ -6,20 +6,23 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0148_flagfeedbackevent_recommendationsevent_recommendationsinteractionevent'), + ( + "contentcuration", + "0148_flagfeedbackevent_recommendationsevent_recommendationsinteractionevent", + ), ] operations = [ migrations.AddField( - model_name='change', - name='unpublishable', + model_name="change", + name="unpublishable", field=models.BooleanField(blank=True, null=True), ), # Add default to False in a separate migration operation # to avoid expensive backfilling of the new column for existing rows migrations.AlterField( - model_name='change', - name='unpublishable', + model_name="change", + name="unpublishable", field=models.BooleanField(blank=True, default=False, null=True), ), ] diff --git a/contentcuration/contentcuration/migrations/0150_bloompub_format_and_preset.py b/contentcuration/contentcuration/migrations/0150_bloompub_format_and_preset.py index e1ffc389ef..c17c71988d 100644 --- a/contentcuration/contentcuration/migrations/0150_bloompub_format_and_preset.py +++ b/contentcuration/contentcuration/migrations/0150_bloompub_format_and_preset.py @@ -6,18 +6,80 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0149_unpublishable_change_field'), + ("contentcuration", "0149_unpublishable_change_field"), ] operations = [ migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('webm', 'WEBM Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('zim', 'ZIM'), ('epub', 'ePub Document'), ('bloompub', 'Bloom Document'), ('bloomd', 'Bloom Document')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("webm", "WEBM Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("zim", "ZIM"), + ("epub", "ePub Document"), + ("bloompub", "Bloom Document"), + ("bloomd", "Bloom Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('audio_dependency', 'audio (dependency)'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('zim', 'Zim'), ('zim_thumbnail', 'Zim Thumbnail'), ('qti', 'QTI Zip'), ('qti_thumbnail', 'QTI Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest'), ('imscp_zip', 'IMSCP Zip'), ('bloompub', 'Bloom Document')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("audio_dependency", "audio (dependency)"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("zim", "Zim"), + ("zim_thumbnail", "Zim Thumbnail"), + ("qti", "QTI Zip"), + ("qti_thumbnail", "QTI Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ("imscp_zip", "IMSCP Zip"), + ("bloompub", "Bloom Document"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0152_alter_assessmentitem_type.py b/contentcuration/contentcuration/migrations/0152_alter_assessmentitem_type.py new file mode 100644 index 0000000000..f047648bb3 --- /dev/null +++ b/contentcuration/contentcuration/migrations/0152_alter_assessmentitem_type.py @@ -0,0 +1,28 @@ +# Generated by Django 3.2.24 on 2025-04-17 16:09 +from django.db import migrations +from django.db import models + + +class Migration(migrations.Migration): + + dependencies = [ + ("contentcuration", "0151_auto_20250417_1516"), + ] + + operations = [ + migrations.AlterField( + model_name="assessmentitem", + name="type", + field=models.CharField( + choices=[ + ("input_question", "Input Question"), + ("multiple_selection", "Multiple Selection"), + ("single_selection", "Single Selection"), + ("free_response", "Free Response"), + ("perseus_question", "Perseus Question"), + ], + default="multiple_selection", + max_length=50, + ), + ), + ] diff --git a/contentcuration/contentcuration/migrations/0153_alter_recommendationsevent_time_hidden.py b/contentcuration/contentcuration/migrations/0153_alter_recommendationsevent_time_hidden.py new file mode 100644 index 0000000000..6ca8841220 --- /dev/null +++ b/contentcuration/contentcuration/migrations/0153_alter_recommendationsevent_time_hidden.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.24 on 2025-05-16 07:02 +from django.db import migrations +from django.db import models + + +class Migration(migrations.Migration): + + dependencies = [ + ("contentcuration", "0152_alter_assessmentitem_type"), + ] + + operations = [ + migrations.AlterField( + model_name="recommendationsevent", + name="time_hidden", + field=models.DateTimeField(blank=True, null=True), + ), + ] diff --git a/contentcuration/contentcuration/migrations/0154_alter_assessmentitem_type.py b/contentcuration/contentcuration/migrations/0154_alter_assessmentitem_type.py new file mode 100644 index 0000000000..7e62b5823c --- /dev/null +++ b/contentcuration/contentcuration/migrations/0154_alter_assessmentitem_type.py @@ -0,0 +1,29 @@ +# Generated by Django 3.2.24 on 2025-09-03 18:39 +from django.db import migrations +from django.db import models + + +class Migration(migrations.Migration): + + dependencies = [ + ("contentcuration", "0153_alter_recommendationsevent_time_hidden"), + ] + + operations = [ + migrations.AlterField( + model_name="assessmentitem", + name="type", + field=models.CharField( + choices=[ + ("input_question", "Input Question"), + ("multiple_selection", "Multiple Selection"), + ("single_selection", "Single Selection"), + ("free_response", "Free Response"), + ("perseus_question", "Perseus Question"), + ("true_false", "True/False"), + ], + default="multiple_selection", + max_length=50, + ), + ), + ] diff --git a/contentcuration/contentcuration/models.py b/contentcuration/contentcuration/models.py index 7d1d1ca7ab..c2d94744e0 100644 --- a/contentcuration/contentcuration/models.py +++ b/contentcuration/contentcuration/models.py @@ -82,27 +82,26 @@ from contentcuration.viewsets.sync.constants import PUBLISHABLE_CHANGE_TABLES from contentcuration.viewsets.sync.constants import PUBLISHED - EDIT_ACCESS = "edit" VIEW_ACCESS = "view" DEFAULT_CONTENT_DEFAULTS = { - 'license': None, - 'language': None, - 'author': None, - 'aggregator': None, - 'provider': None, - 'copyright_holder': None, - 'license_description': None, - 'mastery_model': exercises.NUM_CORRECT_IN_A_ROW_5, - 'm_value': 5, - 'n_value': 5, - 'auto_derive_video_thumbnail': True, - 'auto_derive_audio_thumbnail': True, - 'auto_derive_document_thumbnail': True, - 'auto_derive_html5_thumbnail': True, - 'auto_derive_exercise_thumbnail': True, - 'auto_randomize_questions': True, + "license": None, + "language": None, + "author": None, + "aggregator": None, + "provider": None, + "copyright_holder": None, + "license_description": None, + "mastery_model": exercises.NUM_CORRECT_IN_A_ROW_5, + "m_value": 5, + "n_value": 5, + "auto_derive_video_thumbnail": True, + "auto_derive_audio_thumbnail": True, + "auto_derive_document_thumbnail": True, + "auto_derive_html5_thumbnail": True, + "auto_derive_exercise_thumbnail": True, + "auto_randomize_questions": True, } DEFAULT_USER_PREFERENCES = json.dumps(DEFAULT_CONTENT_DEFAULTS, ensure_ascii=False) @@ -114,10 +113,9 @@ def to_pk(model_or_pk): class UserManager(BaseUserManager): - def create_user(self, email, first_name, last_name, password=None): if not email: - raise ValueError('Email address not specified') + raise ValueError("Email address not specified") new_user = self.model( email=self.normalize_email(email), @@ -137,7 +135,7 @@ def create_superuser(self, email, first_name, last_name, password=None): class UniqueActiveUserIndex(Index): - def create_sql(self, model, schema_editor, using='', **kwargs): + def create_sql(self, model, schema_editor, using="", **kwargs): """ This is a vendored and modified version of the Django create_sql method We do this so that we can monkey patch in the unique index statement onto the schema_editor @@ -146,7 +144,9 @@ def create_sql(self, model, schema_editor, using='', **kwargs): We should remove this as soon as Django natively supports UniqueConstraints with Expressions. This should hopefully be the case in Django 3.3. """ - include = [model._meta.get_field(field_name).column for field_name in self.include] + include = [ + model._meta.get_field(field_name).column for field_name in self.include + ] condition = self._get_condition_sql(model, schema_editor) if self.expressions: index_expressions = [] @@ -173,10 +173,17 @@ def create_sql(self, model, schema_editor, using='', **kwargs): schema_editor.sql_create_index = sql # Generate the SQL staetment that we want to return return_statement = schema_editor._create_index_sql( - model, fields=fields, name=self.name, using=using, - db_tablespace=self.db_tablespace, col_suffixes=col_suffixes, - opclasses=self.opclasses, condition=condition, include=include, - expressions=expressions, **kwargs, + model, + fields=fields, + name=self.name, + using=using, + db_tablespace=self.db_tablespace, + col_suffixes=col_suffixes, + opclasses=self.opclasses, + condition=condition, + include=include, + expressions=expressions, + **kwargs, ) # Reinstate the previous index SQL statement so that we have done no harm schema_editor.sql_create_index = old_create_index_sql @@ -189,15 +196,31 @@ class User(AbstractBaseUser, PermissionsMixin): first_name = models.CharField(max_length=100) last_name = models.CharField(max_length=100) is_admin = models.BooleanField(default=False) - is_active = models.BooleanField('active', default=False, - help_text='Designates whether this user should be treated as active.') - is_staff = models.BooleanField('staff status', default=False, - help_text='Designates whether the user can log into this admin site.') - date_joined = models.DateTimeField('date joined', default=timezone.now) - clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='user_clipboard', on_delete=models.SET_NULL) + is_active = models.BooleanField( + "active", + default=False, + help_text="Designates whether this user should be treated as active.", + ) + is_staff = models.BooleanField( + "staff status", + default=False, + help_text="Designates whether the user can log into this admin site.", + ) + date_joined = models.DateTimeField("date joined", default=timezone.now) + clipboard_tree = models.ForeignKey( + "ContentNode", + null=True, + blank=True, + related_name="user_clipboard", + on_delete=models.SET_NULL, + ) preferences = models.TextField(default=DEFAULT_USER_PREFERENCES) - disk_space = models.FloatField(default=524288000, help_text='How many bytes a user can upload') - disk_space_used = models.FloatField(default=0, help_text='How many bytes a user has uploaded') + disk_space = models.FloatField( + default=524288000, help_text="How many bytes a user can upload" + ) + disk_space_used = models.FloatField( + default=0, help_text="How many bytes a user has uploaded" + ) information = JSONField(null=True) content_defaults = JSONField(default=dict) @@ -206,14 +229,16 @@ class User(AbstractBaseUser, PermissionsMixin): deleted = models.BooleanField(default=False, db_index=True) - _field_updates = FieldTracker(fields=[ - # Field to watch for changes - "disk_space", - ]) + _field_updates = FieldTracker( + fields=[ + # Field to watch for changes + "disk_space", + ] + ) objects = UserManager() - USERNAME_FIELD = 'email' - REQUIRED_FIELDS = ['first_name', 'last_name'] + USERNAME_FIELD = "email" + REQUIRED_FIELDS = ["first_name", "last_name"] def __unicode__(self): return self.email @@ -255,38 +280,53 @@ def hard_delete_user_related_data(self): self.sent_by.all().delete() editable_channels_user_query = ( - User.objects.filter(editable_channels__id=OuterRef('id')) - .values_list('id', flat=True) - .distinct() + User.objects.filter(editable_channels__id=OuterRef("id")) + .values_list("id", flat=True) + .distinct() ) - non_public_channels_sole_editor = self.editable_channels.annotate(num_editors=SQCount( - editable_channels_user_query, field="id")).filter(num_editors=1, public=False) + non_public_channels_sole_editor = self.editable_channels.annotate( + num_editors=SQCount(editable_channels_user_query, field="id") + ).filter(num_editors=1, public=False) # Point sole editor non-public channels' contentnodes to orphan tree to let # our garbage collection delete the nodes and underlying file. - tree_ids_to_update = non_public_channels_sole_editor.values_list('main_tree__tree_id', flat=True) + tree_ids_to_update = non_public_channels_sole_editor.values_list( + "main_tree__tree_id", flat=True + ) for tree_id in tree_ids_to_update: - ContentNode.objects.filter(tree_id=tree_id).update(parent_id=settings.ORPHANAGE_ROOT_ID) + ContentNode.objects.filter(tree_id=tree_id).update( + parent_id=settings.ORPHANAGE_ROOT_ID + ) - logging.debug("Queries after updating content nodes parent ID: %s", connection.queries) + logging.debug( + "Queries after updating content nodes parent ID: %s", connection.queries + ) # Hard delete non-public channels associated with this user (if user is the only editor). non_public_channels_sole_editor.delete() # Hard delete non-public channel collections associated with this user (if user is the only editor). user_query = ( - User.objects.filter(channel_sets__id=OuterRef('id')) - .values_list('id', flat=True) - .distinct() + User.objects.filter(channel_sets__id=OuterRef("id")) + .values_list("id", flat=True) + .distinct() ) - self.channel_sets.annotate(num_editors=SQCount(user_query, field="id")).filter(num_editors=1, public=False).delete() + self.channel_sets.annotate(num_editors=SQCount(user_query, field="id")).filter( + num_editors=1, public=False + ).delete() # Create history! - self.history.create(user_id=self.pk, action=user_history.RELATED_DATA_HARD_DELETION) + self.history.create( + user_id=self.pk, action=user_history.RELATED_DATA_HARD_DELETION + ) def can_edit(self, channel_id): - return Channel.filter_edit_queryset(Channel.objects.all(), self).filter(pk=channel_id).exists() + return ( + Channel.filter_edit_queryset(Channel.objects.all(), self) + .filter(pk=channel_id) + .exists() + ) def check_space(self, size, checksum): if self.is_admin: @@ -298,55 +338,90 @@ def check_space(self, size, checksum): space = self.get_available_space(active_files=active_files) if space < size: - raise PermissionDenied(_("Not enough space. Check your storage under Settings page.")) + raise PermissionDenied( + _("Not enough space. Check your storage under Settings page.") + ) def check_feature_flag(self, flag_name): feature_flags = self.feature_flags or {} return feature_flags.get(flag_name, False) def check_channel_space(self, channel): - active_files = self.get_user_active_files() - staging_tree_id = channel.staging_tree.tree_id - channel_files = self.files\ - .filter(contentnode__tree_id=staging_tree_id)\ - .values('checksum')\ - .distinct()\ - .exclude(checksum__in=active_files.values_list('checksum', flat=True)) - staged_size = float(channel_files.aggregate(used=Sum('file_size'))['used'] or 0) + tree_cte = With(self.get_user_active_trees().distinct(), name="trees") + files_cte = With( + tree_cte.join( + self.files.get_queryset(), contentnode__tree_id=tree_cte.col.tree_id + ) + .values("checksum") + .distinct(), + name="files", + ) - if self.get_available_space(active_files=active_files) < (staged_size): - raise PermissionDenied(_('Out of storage! Request more space under Settings > Storage.')) + staging_tree_files = ( + self.files.filter(contentnode__tree_id=channel.staging_tree.tree_id) + .with_cte(tree_cte) + .with_cte(files_cte) + .exclude(Exists(files_cte.queryset().filter(checksum=OuterRef("checksum")))) + .values("checksum") + .distinct() + ) + staged_size = float( + staging_tree_files.aggregate(used=Sum("file_size"))["used"] or 0 + ) + + if self.get_available_space() < staged_size: + raise PermissionDenied( + _("Out of storage! Request more space under Settings > Storage.") + ) def check_staged_space(self, size, checksum): + """ + .. deprecated:: only used in `api_file_upload` which is now deprecated + """ if self.staged_files.filter(checksum=checksum).exists(): return True space = self.get_available_staged_space() if space < size: - raise PermissionDenied(_('Out of storage! Request more space under Settings > Storage.')) + raise PermissionDenied( + _("Out of storage! Request more space under Settings > Storage.") + ) def get_available_staged_space(self): - space_used = self.staged_files.values('checksum').distinct().aggregate(size=Sum("file_size"))['size'] or 0 + """ + .. deprecated:: only used in `api_file_upload` which is now deprecated + """ + space_used = ( + self.staged_files.values("checksum") + .distinct() + .aggregate(size=Sum("file_size"))["size"] + or 0 + ) return float(max(self.disk_space - space_used, 0)) def get_available_space(self, active_files=None): - return float(max(self.disk_space - self.get_space_used(active_files=active_files), 0)) + return float( + max(self.disk_space - self.get_space_used(active_files=active_files), 0) + ) def get_user_active_trees(self): - return self.editable_channels.exclude(deleted=True)\ - .values(tree_id=F("main_tree__tree_id")) + return self.editable_channels.exclude(deleted=True).values( + tree_id=F("main_tree__tree_id") + ) def get_user_active_files(self): cte = With(self.get_user_active_trees().distinct()) - return cte.join(self.files.get_queryset(), contentnode__tree_id=cte.col.tree_id)\ - .with_cte(cte)\ - .values('checksum')\ + return ( + cte.join(self.files.get_queryset(), contentnode__tree_id=cte.col.tree_id) + .with_cte(cte) + .values("checksum") .distinct() + ) def get_space_used(self, active_files=None): active_files = active_files or self.get_user_active_files() - files = active_files.aggregate(total_used=Sum('file_size')) - return float(files['total_used'] or 0) + files = active_files.aggregate(total_used=Sum("file_size")) + return float(files["total_used"] or 0) def set_space_used(self): self.disk_space_used = self.get_space_used() @@ -355,13 +430,15 @@ def set_space_used(self): def get_space_used_by_kind(self): active_files = self.get_user_active_files() - files = active_files.values('preset__kind_id')\ - .annotate(space=Sum('file_size'))\ - .order_by() + files = ( + active_files.values("preset__kind_id") + .annotate(space=Sum("file_size")) + .order_by() + ) kind_dict = {} for item in files: - kind_dict[item['preset__kind_id']] = item['space'] + kind_dict[item["preset__kind_id"]] = item["space"] return kind_dict def email_user(self, subject, message, from_email=None, **kwargs): @@ -381,7 +458,7 @@ def get_full_name(self): """ Returns the first_name plus the last_name, with a space in between. """ - full_name = '%s %s' % (self.first_name, self.last_name) + full_name = "%s %s" % (self.first_name, self.last_name) return full_name.strip() def get_short_name(self): @@ -396,9 +473,10 @@ def get_token(self): def save(self, *args, **kwargs): from contentcuration.utils.user import calculate_user_storage + super(User, self).save(*args, **kwargs) - if 'disk_space' in self._field_updates.changed(): + if "disk_space" in self._field_updates.changed(): calculate_user_storage(self.pk) changed = False @@ -408,18 +486,37 @@ def save(self, *args, **kwargs): changed = True if not self.clipboard_tree: - self.clipboard_tree = ContentNode.objects.create(title=self.email + " clipboard", kind_id=content_kinds.TOPIC) + self.clipboard_tree = ContentNode.objects.create( + title=self.email + " clipboard", kind_id=content_kinds.TOPIC + ) self.clipboard_tree.save() changed = True if changed: self.save() + def get_server_rev(self): + changes_cte = With( + Change.objects.filter(user=self).values("server_rev", "applied"), + ) + return ( + changes_cte.queryset() + .with_cte(changes_cte) + .filter(applied=True) + .values_list("server_rev", flat=True) + .order_by("-server_rev") + .first() + ) or 0 + class Meta: verbose_name = "User" verbose_name_plural = "Users" indexes = [ - UniqueActiveUserIndex(Lower('email'), condition=Q(is_active=True), name="contentcura_email_d4d492_idx") + UniqueActiveUserIndex( + Lower("email"), + condition=Q(is_active=True), + name="contentcura_email_d4d492_idx", + ) ] @classmethod @@ -433,13 +530,17 @@ def filter_view_queryset(cls, queryset, user): # all shared editors all_editable = User.editable_channels.through.objects.all() editable = all_editable.filter( - channel_id__in=all_editable.filter(user_id=user.pk).values_list("channel_id", flat=True) + channel_id__in=all_editable.filter(user_id=user.pk).values_list( + "channel_id", flat=True + ) ) # all shared viewers all_view_only = User.view_only_channels.through.objects.all() view_only = all_view_only.filter( - channel_id__in=all_view_only.filter(user_id=user.pk).values_list("channel_id", flat=True) + channel_id__in=all_view_only.filter(user_id=user.pk).values_list( + "channel_id", flat=True + ) ) return queryset.filter( @@ -479,9 +580,8 @@ def get_for_email(cls, email, deleted=False, **filters): class UUIDField(models.CharField): - def __init__(self, *args, **kwargs): - kwargs['max_length'] = 32 + kwargs["max_length"] = 32 super(UUIDField, self).__init__(*args, **kwargs) def prepare_value(self, value): @@ -545,14 +645,14 @@ def object_storage_name(instance, filename): :return: str """ - default_ext = '' + default_ext = "" if instance.file_format_id: - default_ext = '.{}'.format(instance.file_format_id) + default_ext = ".{}".format(instance.file_format_id) return generate_object_storage_name(instance.checksum, filename, default_ext) -def generate_object_storage_name(checksum, filename, default_ext=''): +def generate_object_storage_name(checksum, filename, default_ext=""): """ Separated from file_on_disk_name to allow for simple way to check if has already exists """ h = checksum basename, actual_ext = os.path.splitext(filename) @@ -628,6 +728,7 @@ def _save(self, name, content): class SecretToken(models.Model): """Tokens for channels""" + token = models.CharField(max_length=100, unique=True) is_primary = models.BooleanField(default=False) @@ -678,10 +779,10 @@ def get_channel_thumbnail(channel): if thumbnail_data.get("base64"): return thumbnail_data["base64"] - if channel.get("thumbnail") and 'static' not in channel.get("thumbnail"): + if channel.get("thumbnail") and "static" not in channel.get("thumbnail"): return generate_storage_url(channel.get("thumbnail")) - return '/static/img/kolibri_placeholder.png' + return "/static/img/kolibri_placeholder.png" CHANNEL_NAME_INDEX_NAME = "channel_name_idx" @@ -705,24 +806,31 @@ def boolean_val(val): class PermissionCTE(With): tree_id_fields = [ - "channel__{}__tree_id".format(tree_name) - for tree_name in CHANNEL_TREES + "channel__{}__tree_id".format(tree_name) for tree_name in CHANNEL_TREES ] def __init__(self, model, user_id, **kwargs): - queryset = model.objects.filter(user_id=user_id)\ - .annotate( - tree_id=Unnest(ArrayRemove(Array(*self.tree_id_fields), None), output_field=models.IntegerField()) + queryset = model.objects.filter(user_id=user_id).annotate( + tree_id=Unnest( + ArrayRemove(Array(*self.tree_id_fields), None), + output_field=models.IntegerField(), + ) + ) + super(PermissionCTE, self).__init__( + queryset=queryset.values("user_id", "channel_id", "tree_id"), **kwargs ) - super(PermissionCTE, self).__init__(queryset=queryset.values("user_id", "channel_id", "tree_id"), **kwargs) @classmethod def editable_channels(cls, user_id): - return PermissionCTE(User.editable_channels.through, user_id, name="editable_channels_cte") + return PermissionCTE( + User.editable_channels.through, user_id, name="editable_channels_cte" + ) @classmethod def view_only_channels(cls, user_id): - return PermissionCTE(User.view_only_channels.through, user_id, name="view_only_channels_cte") + return PermissionCTE( + User.view_only_channels.through, user_id, name="view_only_channels_cte" + ) def exists(self, *filters): return Exists(self.queryset().filter(*filters).values("user_id")) @@ -756,6 +864,7 @@ def update_or_create(self, defaults=None, **kwargs): class Channel(models.Model): """ Permissions come from association with organizations """ + id = UUIDField(primary_key=True, default=uuid.uuid4) name = models.CharField(max_length=200, blank=True) description = models.CharField(max_length=400, blank=True) @@ -765,39 +874,83 @@ class Channel(models.Model): thumbnail_encoding = JSONField(default=dict) editors = models.ManyToManyField( settings.AUTH_USER_MODEL, - related_name='editable_channels', + related_name="editable_channels", verbose_name="editors", help_text="Users with edit rights", blank=True, ) viewers = models.ManyToManyField( settings.AUTH_USER_MODEL, - related_name='view_only_channels', + related_name="view_only_channels", verbose_name="viewers", help_text="Users with view only rights", blank=True, ) - language = models.ForeignKey('Language', null=True, blank=True, related_name='channel_language', on_delete=models.SET_NULL) - trash_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_trash', on_delete=models.SET_NULL) - clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_clipboard', on_delete=models.SET_NULL) - main_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_main', on_delete=models.SET_NULL) - staging_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_staging', on_delete=models.SET_NULL) - chef_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_chef', on_delete=models.SET_NULL) - previous_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_previous', on_delete=models.SET_NULL) + language = models.ForeignKey( + "Language", + null=True, + blank=True, + related_name="channel_language", + on_delete=models.SET_NULL, + ) + trash_tree = models.ForeignKey( + "ContentNode", + null=True, + blank=True, + related_name="channel_trash", + on_delete=models.SET_NULL, + ) + clipboard_tree = models.ForeignKey( + "ContentNode", + null=True, + blank=True, + related_name="channel_clipboard", + on_delete=models.SET_NULL, + ) + main_tree = models.ForeignKey( + "ContentNode", + null=True, + blank=True, + related_name="channel_main", + on_delete=models.SET_NULL, + ) + staging_tree = models.ForeignKey( + "ContentNode", + null=True, + blank=True, + related_name="channel_staging", + on_delete=models.SET_NULL, + ) + chef_tree = models.ForeignKey( + "ContentNode", + null=True, + blank=True, + related_name="channel_chef", + on_delete=models.SET_NULL, + ) + previous_tree = models.ForeignKey( + "ContentNode", + null=True, + blank=True, + related_name="channel_previous", + on_delete=models.SET_NULL, + ) bookmarked_by = models.ManyToManyField( settings.AUTH_USER_MODEL, - related_name='bookmarked_channels', + related_name="bookmarked_channels", verbose_name="bookmarked by", ) deleted = models.BooleanField(default=False, db_index=True) public = models.BooleanField(default=False, db_index=True) preferences = models.TextField(default=DEFAULT_USER_PREFERENCES) content_defaults = JSONField(default=dict) - priority = models.IntegerField(default=0, help_text="Order to display public channels") + priority = models.IntegerField( + default=0, help_text="Order to display public channels" + ) last_published = models.DateTimeField(blank=True, null=True) secret_tokens = models.ManyToManyField( SecretToken, - related_name='channels', + related_name="channels", verbose_name="secret tokens", blank=True, ) @@ -817,26 +970,28 @@ class Channel(models.Model): published_size = models.FloatField(default=0) included_languages = models.ManyToManyField( "Language", - related_name='channels', + related_name="channels", verbose_name="languages", blank=True, ) - _field_updates = FieldTracker(fields=[ - # Field to watch for changes - "description", - "language_id", - "thumbnail", - "name", - "thumbnail_encoding", - # watch these fields for changes - # but exclude them from setting changed - # on the main tree - "deleted", - "public", - "main_tree_id", - "version", - ]) + _field_updates = FieldTracker( + fields=[ + # Field to watch for changes + "description", + "language_id", + "thumbnail", + "name", + "thumbnail_encoding", + # watch these fields for changes + # but exclude them from setting changed + # on the main tree + "deleted", + "public", + "main_tree_id", + "version", + ] + ) objects = ChannelModelQuerySet.as_manager() @@ -852,7 +1007,11 @@ def filter_edit_queryset(cls, queryset, user): if not user_id: return queryset.none() - edit = Exists(User.editable_channels.through.objects.filter(user_id=user_id, channel_id=OuterRef("id"))) + edit = Exists( + User.editable_channels.through.objects.filter( + user_id=user_id, channel_id=OuterRef("id") + ) + ) queryset = queryset.annotate(edit=edit) if user.is_admin: return queryset @@ -866,8 +1025,16 @@ def filter_view_queryset(cls, queryset, user): if user_id: filters = dict(user_id=user_id, channel_id=OuterRef("id")) - edit = Exists(User.editable_channels.through.objects.filter(**filters).values("user_id")) - view = Exists(User.view_only_channels.through.objects.filter(**filters).values("user_id")) + edit = Exists( + User.editable_channels.through.objects.filter(**filters).values( + "user_id" + ) + ) + view = Exists( + User.view_only_channels.through.objects.filter(**filters).values( + "user_id" + ) + ) else: edit = boolean_val(False) view = boolean_val(False) @@ -882,9 +1049,9 @@ def filter_view_queryset(cls, queryset, user): permission_filter = Q() if user_id: - pending_channels = Invitation.objects.filter(email=user_email, revoked=False, declined=False, accepted=False).values_list( - "channel_id", flat=True - ) + pending_channels = Invitation.objects.filter( + email=user_email, revoked=False, declined=False, accepted=False + ).values_list("channel_id", flat=True) permission_filter = ( Q(view=True) | Q(edit=True) | Q(deleted=False, id__in=pending_channels) ) @@ -893,7 +1060,11 @@ def filter_view_queryset(cls, queryset, user): @classmethod def get_all_channels(cls): - return cls.objects.select_related('main_tree').prefetch_related('editors', 'viewers').distinct() + return ( + cls.objects.select_related("main_tree") + .prefetch_related("editors", "viewers") + .distinct() + ) def resource_size_key(self): return "{}_resource_size".format(self.pk) @@ -905,13 +1076,15 @@ def get_resource_size(self): if cached_data: return cached_data tree_id = self.main_tree.tree_id - files = File.objects.select_related('contentnode', 'assessment_item')\ - .filter(contentnode__tree_id=tree_id)\ - .values('checksum', 'file_size')\ - .distinct()\ - .aggregate(resource_size=Sum('file_size')) - cache.set(self.resource_size_key(), files['resource_size'] or 0, None) - return files['resource_size'] or 0 + files = ( + File.objects.select_related("contentnode", "assessment_item") + .filter(contentnode__tree_id=tree_id) + .values("checksum", "file_size") + .distinct() + .aggregate(resource_size=Sum("file_size")) + ) + cache.set(self.resource_size_key(), files["resource_size"] or 0, None) + return files["resource_size"] or 0 def on_create(self): actor_id = getattr(self, "_actor_id", None) @@ -934,7 +1107,12 @@ def on_create(self): ) # Ensure that locust or unit tests raise if there are any concurrency issues with tree ids. if settings.DEBUG: - if ContentNode.objects.filter(parent=None, tree_id=self.main_tree.tree_id).count() != 1: + if ( + ContentNode.objects.filter( + parent=None, tree_id=self.main_tree.tree_id + ).count() + != 1 + ): raise AssertionError if not self.trash_tree: @@ -951,20 +1129,31 @@ def on_create(self): def on_update(self): # noqa C901 from contentcuration.utils.user import calculate_user_storage + original_values = self._field_updates.changed() - blacklist = set([ - "public", - "main_tree_id", - "version", - ]) + blacklist = set( + [ + "public", + "main_tree_id", + "version", + ] + ) - if self.main_tree and original_values and any((True for field in original_values if field not in blacklist)): + if ( + self.main_tree + and original_values + and any((True for field in original_values if field not in blacklist)) + ): # Changing channel metadata should also mark main_tree as changed self.main_tree.changed = True # Check if original thumbnail is no longer referenced - if "thumbnail" in original_values and original_values["thumbnail"] and 'static' not in original_values["thumbnail"]: + if ( + "thumbnail" in original_values + and original_values["thumbnail"] + and "static" not in original_values["thumbnail"] + ): filename, ext = os.path.splitext(original_values["thumbnail"]) delete_empty_file_reference(filename, ext[1:]) @@ -976,7 +1165,9 @@ def on_update(self): # noqa C901 if "deleted" in original_values and not original_values["deleted"]: self.pending_editors.all().delete() # Delete db if channel has been deleted and mark as unpublished - export_db_storage_path = os.path.join(settings.DB_ROOT, "{channel_id}.sqlite3".format(channel_id=self.id)) + export_db_storage_path = os.path.join( + settings.DB_ROOT, "{channel_id}.sqlite3".format(channel_id=self.id) + ) if default_storage.exists(export_db_storage_path): default_storage.delete(export_db_storage_path) if self.main_tree: @@ -995,7 +1186,9 @@ def on_update(self): # noqa C901 self.main_tree.save() # if this change affects the published channel list, clear the channel cache - if "public" in original_values and (self.main_tree and self.main_tree.published): + if "public" in original_values and ( + self.main_tree and self.main_tree.published + ): delete_public_channel_cache_keys() def save(self, *args, **kwargs): @@ -1011,19 +1204,33 @@ def save(self, *args, **kwargs): super(Channel, self).save(*args, **kwargs) if creating: - self.history.create(actor_id=self._actor_id, action=channel_history.CREATION) + self.history.create( + actor_id=self._actor_id, action=channel_history.CREATION + ) def get_thumbnail(self): return get_channel_thumbnail(self) def has_changes(self): - return self.main_tree.get_descendants(include_self=True).filter(changed=True).exists() + return ( + self.main_tree.get_descendants(include_self=True) + .filter(changed=True) + .exists() + ) def get_date_modified(self): - return self.main_tree.get_descendants(include_self=True).aggregate(last_modified=Max('modified'))['last_modified'] + return self.main_tree.get_descendants(include_self=True).aggregate( + last_modified=Max("modified") + )["last_modified"] def get_resource_count(self): - return self.main_tree.get_descendants().exclude(kind_id=content_kinds.TOPIC).order_by('content_id').distinct('content_id').count() + return ( + self.main_tree.get_descendants() + .exclude(kind_id=content_kinds.TOPIC) + .order_by("content_id") + .distinct("content_id") + .count() + ) def get_human_token(self): return self.secret_tokens.get(is_primary=True) @@ -1032,7 +1239,9 @@ def get_channel_id_token(self): return self.secret_tokens.get(token=self.id) def make_token(self): - token = self.secret_tokens.create(token=SecretToken.generate_new_token(), is_primary=True) + token = self.secret_tokens.create( + token=SecretToken.generate_new_token(), is_primary=True + ) self.secret_tokens.get_or_create(token=self.id) return token @@ -1046,7 +1255,9 @@ def make_public(self, bypass_signals=False): Returns the same channel object. """ if bypass_signals: - self.public = True # set this attribute still, so the object will be updated + self.public = ( + True # set this attribute still, so the object will be updated + ) Channel.objects.filter(id=self.id).update(public=True) # clear the channel cache delete_public_channel_cache_keys() @@ -1070,7 +1281,8 @@ def get_server_rev(self): .with_cte(changes_cte) .filter(applied=True) .values_list("server_rev", flat=True) - .order_by("-server_rev").first() + .order_by("-server_rev") + .first() ) or 0 @property @@ -1089,12 +1301,20 @@ def get_public_channels(cls, defer_nonmain_trees=False): If defer_nonmain_trees is True, defer the loading of all trees except for the main_tree.""" if defer_nonmain_trees: - c = (Channel.objects - .filter(public=True) - .exclude(deleted=True) - .select_related('main_tree') - .prefetch_related('editors') - .defer('trash_tree', 'clipboard_tree', 'staging_tree', 'chef_tree', 'previous_tree', 'viewers')) + c = ( + Channel.objects.filter(public=True) + .exclude(deleted=True) + .select_related("main_tree") + .prefetch_related("editors") + .defer( + "trash_tree", + "clipboard_tree", + "staging_tree", + "chef_tree", + "previous_tree", + "viewers", + ) + ) else: c = Channel.objects.filter(public=True).exclude(deleted=True) @@ -1107,9 +1327,7 @@ class Meta: indexes = [ models.Index(fields=["name"], name=CHANNEL_NAME_INDEX_NAME), ] - index_together = [ - ["deleted", "public"] - ] + index_together = [["deleted", "public"]] CHANNEL_HISTORY_CHANNEL_INDEX_NAME = "idx_channel_history_channel_id" @@ -1119,8 +1337,21 @@ class ChannelHistory(models.Model): """ Model for tracking certain actions performed on a channel """ - channel = models.ForeignKey('Channel', null=False, blank=False, related_name='history', on_delete=models.CASCADE) - actor = models.ForeignKey('User', null=False, blank=False, related_name='channel_history', on_delete=models.CASCADE) + + channel = models.ForeignKey( + "Channel", + null=False, + blank=False, + related_name="history", + on_delete=models.CASCADE, + ) + actor = models.ForeignKey( + "User", + null=False, + blank=False, + related_name="channel_history", + on_delete=models.CASCADE, + ) performed = models.DateTimeField(default=timezone.now) action = models.CharField(max_length=50, choices=channel_history.choices) @@ -1130,7 +1361,11 @@ def prune(cls): Prunes history records by keeping the most recent actions for each channel and type, and deleting all other older actions """ - keep_ids = cls.objects.distinct("channel_id", "action").order_by("channel_id", "action", "-performed").values_list("id", flat=True) + keep_ids = ( + cls.objects.distinct("channel_id", "action") + .order_by("channel_id", "action", "-performed") + .values_list("id", flat=True) + ) cls.objects.exclude(id__in=keep_ids).delete() class Meta: @@ -1138,7 +1373,9 @@ class Meta: verbose_name_plural = "Channel histories" indexes = [ - models.Index(fields=["channel_id"], name=CHANNEL_HISTORY_CHANNEL_INDEX_NAME), + models.Index( + fields=["channel_id"], name=CHANNEL_HISTORY_CHANNEL_INDEX_NAME + ), ] @@ -1146,7 +1383,14 @@ class UserHistory(models.Model): """ Model that stores the user's action history. """ - user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, blank=False, related_name="history", on_delete=models.CASCADE) + + user = models.ForeignKey( + settings.AUTH_USER_MODEL, + null=False, + blank=False, + related_name="history", + on_delete=models.CASCADE, + ) action = models.CharField(max_length=32, choices=user_history.choices) performed_at = models.DateTimeField(default=timezone.now) @@ -1161,19 +1405,29 @@ class ChannelSet(models.Model): public = models.BooleanField(default=False, db_index=True) editors = models.ManyToManyField( settings.AUTH_USER_MODEL, - related_name='channel_sets', + related_name="channel_sets", verbose_name="editors", help_text="Users with edit rights", blank=True, ) - secret_token = models.ForeignKey('SecretToken', null=True, blank=True, related_name='channel_sets', on_delete=models.SET_NULL) + secret_token = models.ForeignKey( + "SecretToken", + null=True, + blank=True, + related_name="channel_sets", + on_delete=models.SET_NULL, + ) @classmethod def filter_edit_queryset(cls, queryset, user): if user.is_anonymous: return queryset.none() user_id = not user.is_anonymous and user.id - edit = Exists(User.channel_sets.through.objects.filter(user_id=user_id, channelset_id=OuterRef("id"))) + edit = Exists( + User.channel_sets.through.objects.filter( + user_id=user_id, channelset_id=OuterRef("id") + ) + ) queryset = queryset.annotate(edit=edit) if user.is_admin: return queryset @@ -1196,7 +1450,9 @@ def save(self, *args, **kwargs): def on_create(self): if not self.secret_token: - self.secret_token = SecretToken.objects.create(token=SecretToken.generate_new_token()) + self.secret_token = SecretToken.objects.create( + token=SecretToken.generate_new_token() + ) def delete(self, *args, **kwargs): super(ChannelSet, self).delete(*args, **kwargs) @@ -1208,20 +1464,28 @@ def delete(self, *args, **kwargs): class ContentTag(models.Model): id = UUIDField(primary_key=True, default=uuid.uuid4) tag_name = models.CharField(max_length=50) - channel = models.ForeignKey('Channel', related_name='tags', blank=True, null=True, db_index=True, on_delete=models.SET_NULL) + channel = models.ForeignKey( + "Channel", + related_name="tags", + blank=True, + null=True, + db_index=True, + on_delete=models.SET_NULL, + ) objects = CustomManager() def __str__(self): return self.tag_name class Meta: - unique_together = ['tag_name', 'channel'] + unique_together = ["tag_name", "channel"] class License(models.Model): """ Normalize the license of ContentNode model """ + license_name = models.CharField(max_length=50) license_url = models.URLField(blank=True) license_description = models.TextField(blank=True) @@ -1236,7 +1500,7 @@ class License(models.Model): @classmethod def validate_name(cls, name): if cls.objects.filter(license_name=name).count() == 0: - raise ValidationError('License `{}` does not exist'.format(name)) + raise ValidationError("License `{}` does not exist".format(name)) def __str__(self): return self.license_name @@ -1252,6 +1516,7 @@ class ContentNode(MPTTModel, models.Model): """ By default, all nodes have a title and can be used as a topic. """ + # Random id used internally on Studio (See `node_id` for id used in Kolibri) id = UUIDField(primary_key=True, default=uuid.uuid4) @@ -1260,18 +1525,26 @@ class ContentNode(MPTTModel, models.Model): # interacts with a piece of content, all substantially similar pieces of # content should be marked as such as well. We track these "substantially # similar" types of content by having them have the same content_id. - content_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False, db_index=True) + content_id = UUIDField( + primary_key=False, default=uuid.uuid4, editable=False, db_index=True + ) # Note this field is indexed, but we are using the Index API to give it an explicit name, see the model Meta node_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False) # TODO: disallow nulls once existing models have been set - original_channel_id = UUIDField(primary_key=False, editable=False, null=True, - db_index=True) # Original channel copied from - source_channel_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate channel copied from + original_channel_id = UUIDField( + primary_key=False, editable=False, null=True, db_index=True + ) # Original channel copied from + source_channel_id = UUIDField( + primary_key=False, editable=False, null=True + ) # Immediate channel copied from # Original node_id of node copied from (TODO: original_node_id clashes with original_node field - temporary) - original_source_node_id = UUIDField(primary_key=False, editable=False, null=True, - db_index=True) - source_node_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate node_id of node copied from + original_source_node_id = UUIDField( + primary_key=False, editable=False, null=True, db_index=True + ) + source_node_id = UUIDField( + primary_key=False, editable=False, null=True + ) # Immediate node_id of node copied from # Fields specific to content generated by Ricecooker source_id = models.CharField(max_length=200, blank=True, null=True) @@ -1279,24 +1552,75 @@ class ContentNode(MPTTModel, models.Model): title = models.CharField(max_length=200, blank=True) description = models.TextField(blank=True) - kind = models.ForeignKey('ContentKind', related_name='contentnodes', db_index=True, null=True, blank=True, on_delete=models.SET_NULL) - license = models.ForeignKey('License', null=True, blank=True, on_delete=models.SET_NULL) + kind = models.ForeignKey( + "ContentKind", + related_name="contentnodes", + db_index=True, + null=True, + blank=True, + on_delete=models.SET_NULL, + ) + license = models.ForeignKey( + "License", null=True, blank=True, on_delete=models.SET_NULL + ) license_description = models.CharField(max_length=400, null=True, blank=True) - prerequisite = models.ManyToManyField('self', related_name='is_prerequisite_of', - through='PrerequisiteContentRelationship', symmetrical=False, blank=True) - is_related = models.ManyToManyField('self', related_name='relate_to', through='RelatedContentRelationship', - symmetrical=False, blank=True) - language = models.ForeignKey('Language', null=True, blank=True, related_name='content_language', on_delete=models.SET_NULL) - parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True, on_delete=models.CASCADE) - tags = models.ManyToManyField(ContentTag, symmetrical=False, related_name='tagged_content', blank=True) + prerequisite = models.ManyToManyField( + "self", + related_name="is_prerequisite_of", + through="PrerequisiteContentRelationship", + symmetrical=False, + blank=True, + ) + is_related = models.ManyToManyField( + "self", + related_name="relate_to", + through="RelatedContentRelationship", + symmetrical=False, + blank=True, + ) + language = models.ForeignKey( + "Language", + null=True, + blank=True, + related_name="content_language", + on_delete=models.SET_NULL, + ) + parent = TreeForeignKey( + "self", + null=True, + blank=True, + related_name="children", + db_index=True, + on_delete=models.CASCADE, + ) + tags = models.ManyToManyField( + ContentTag, symmetrical=False, related_name="tagged_content", blank=True + ) # No longer used - sort_order = models.FloatField(max_length=50, default=1, verbose_name="sort order", - help_text="Ascending, lowest number shown first") - copyright_holder = models.CharField(max_length=200, null=True, blank=True, default="", - help_text="Organization of person who holds the essential rights") + sort_order = models.FloatField( + max_length=50, + default=1, + verbose_name="sort order", + help_text="Ascending, lowest number shown first", + ) + copyright_holder = models.CharField( + max_length=200, + null=True, + blank=True, + default="", + help_text="Organization of person who holds the essential rights", + ) # legacy field... - original_node = TreeForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='duplicates') - cloned_source = TreeForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='clones') + original_node = TreeForeignKey( + "self", + on_delete=models.SET_NULL, + null=True, + blank=True, + related_name="duplicates", + ) + cloned_source = TreeForeignKey( + "self", on_delete=models.SET_NULL, null=True, blank=True, related_name="clones" + ) thumbnail_encoding = models.TextField(blank=True, null=True) @@ -1314,14 +1638,31 @@ class ContentNode(MPTTModel, models.Model): - n: n value for M out of N mastery criteria """ extra_fields = JSONField(default=dict, blank=True, null=True) - author = models.CharField(max_length=200, blank=True, default="", help_text="Who created this content?", - null=True) - aggregator = models.CharField(max_length=200, blank=True, default="", help_text="Who gathered this content together?", - null=True) - provider = models.CharField(max_length=200, blank=True, default="", help_text="Who distributed this content?", - null=True) - - role_visibility = models.CharField(max_length=50, choices=roles.choices, default=roles.LEARNER) + author = models.CharField( + max_length=200, + blank=True, + default="", + help_text="Who created this content?", + null=True, + ) + aggregator = models.CharField( + max_length=200, + blank=True, + default="", + help_text="Who gathered this content together?", + null=True, + ) + provider = models.CharField( + max_length=200, + blank=True, + default="", + help_text="Who distributed this content?", + null=True, + ) + + role_visibility = models.CharField( + max_length=50, choices=roles.choices, default=roles.LEARNER + ) freeze_authoring_data = models.BooleanField(default=False) # Fields for metadata labels @@ -1339,7 +1680,11 @@ class ContentNode(MPTTModel, models.Model): # A field for storing a suggested duration for the content node # this duration should be in seconds. - suggested_duration = models.IntegerField(blank=True, null=True, help_text="Suggested duration for the content node (in seconds)") + suggested_duration = models.IntegerField( + blank=True, + null=True, + help_text="Suggested duration for the content node (in seconds)", + ) objects = CustomContentNodeTreeManager() @@ -1378,9 +1723,15 @@ def filter_by_pk(cls, pk): if tree_id: query = query.filter(tree_id=tree_id) else: - tree_id = ContentNode.objects.filter(pk=pk).values_list("tree_id", flat=True).first() + tree_id = ( + ContentNode.objects.filter(pk=pk) + .values_list("tree_id", flat=True) + .first() + ) if tree_id: - cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk), tree_id, None) + cache.set( + CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk), tree_id, None + ) query = query.filter(tree_id=tree_id) else: query = query.none() @@ -1418,24 +1769,26 @@ def filter_view_queryset(cls, queryset, user): ) if not user_id: - return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True) + return queryset.annotate( + edit=boolean_val(False), view=boolean_val(False) + ).filter(public=True) edit_cte = PermissionCTE.editable_channels(user_id) view_cte = PermissionCTE.view_only_channels(user_id) - queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate( - edit=edit_cte.exists(cls._permission_filter), - view=view_cte.exists(cls._permission_filter), + queryset = ( + queryset.with_cte(edit_cte) + .with_cte(view_cte) + .annotate( + edit=edit_cte.exists(cls._permission_filter), + view=view_cte.exists(cls._permission_filter), + ) ) if user.is_admin: return queryset - return queryset.filter( - Q(view=True) - | Q(edit=True) - | Q(public=True) - ) + return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True)) @raise_if_unsaved def get_root(self): @@ -1450,12 +1803,12 @@ def get_root_id(self): if self.is_root_node() and self.kind_id != content_kinds.TOPIC: return self - return ContentNode.objects.values_list('pk', flat=True).get( - tree_id=self._mpttfield('tree_id'), + return ContentNode.objects.values_list("pk", flat=True).get( + tree_id=self._mpttfield("tree_id"), parent=None, ) - def get_tree_data(self, levels=float('inf')): + def get_tree_data(self, levels=float("inf")): """ Returns `levels`-deep tree information starting at current node. Args: @@ -1473,7 +1826,9 @@ def get_tree_data(self, levels=float('inf')): } children = self.children.all() if levels > 0: - node_data["children"] = [c.get_tree_data(levels=levels - 1) for c in children] + node_data["children"] = [ + c.get_tree_data(levels=levels - 1) for c in children + ] return node_data if self.kind_id == content_kinds.EXERCISE: return { @@ -1486,7 +1841,9 @@ def get_tree_data(self, levels=float('inf')): return { "title": self.title, "kind": self.kind_id, - "file_size": self.files.values('file_size').aggregate(size=Sum('file_size'))['size'], + "file_size": self.files.values("file_size").aggregate( + size=Sum("file_size") + )["size"], "node_id": self.node_id, "studio_id": self.id, } @@ -1494,9 +1851,20 @@ def get_tree_data(self, levels=float('inf')): def get_original_node(self): original_node = self.original_node or self if self.original_channel_id and self.original_source_node_id: - original_tree_id = Channel.objects.select_related("main_tree").get(pk=self.original_channel_id).main_tree.tree_id - original_node = ContentNode.objects.filter(tree_id=original_tree_id, node_id=self.original_source_node_id).first() or \ - ContentNode.objects.filter(tree_id=original_tree_id, content_id=self.content_id).first() or self + original_tree_id = ( + Channel.objects.select_related("main_tree") + .get(pk=self.original_channel_id) + .main_tree.tree_id + ) + original_node = ( + ContentNode.objects.filter( + tree_id=original_tree_id, node_id=self.original_source_node_id + ).first() + or ContentNode.objects.filter( + tree_id=original_tree_id, content_id=self.content_id + ).first() + or self + ) return original_node def get_associated_presets(self): @@ -1541,7 +1909,13 @@ def get_channel(self): root = self.get_root() if not root: return None - return Channel.objects.filter(Q(main_tree=root) | Q(chef_tree=root) | Q(trash_tree=root) | Q(staging_tree=root) | Q(previous_tree=root)).first() + return Channel.objects.filter( + Q(main_tree=root) + | Q(chef_tree=root) + | Q(trash_tree=root) + | Q(staging_tree=root) + | Q(previous_tree=root) + ).first() except (ObjectDoesNotExist, MultipleObjectsReturned, AttributeError): return None @@ -1583,10 +1957,7 @@ def get_details(self, channel=None): node = ContentNode.objects.filter(pk=self.id, tree_id=self.tree_id).order_by() - descendants = ( - self.get_descendants() - .values("id") - ) + descendants = self.get_descendants().values("id") # Get resources resources = descendants.exclude(kind=content_kinds.TOPIC).order_by() @@ -1848,21 +2219,24 @@ def get_details(self, channel=None): def has_changes(self): mptt_opts = self._mptt_meta # Ignore fields that are used for dirty tracking, and also mptt fields, as changes to these are tracked in mptt manager methods. - blacklist = set([ - 'changed', - 'modified', - 'publishing', - mptt_opts.tree_id_attr, - mptt_opts.left_attr, - mptt_opts.right_attr, - mptt_opts.level_attr, - ]) + blacklist = set( + [ + "changed", + "modified", + "publishing", + mptt_opts.tree_id_attr, + mptt_opts.left_attr, + mptt_opts.right_attr, + mptt_opts.level_attr, + ] + ) original_values = self._field_updates.changed() return any((True for field in original_values if field not in blacklist)) def recalculate_editors_storage(self): from contentcuration.utils.user import calculate_user_storage - for editor in self.files.values_list('uploaded_by_id', flat=True).distinct(): + + for editor in self.files.values_list("uploaded_by_id", flat=True).distinct(): calculate_user_storage(editor) def mark_complete(self): # noqa C901 @@ -1875,37 +2249,58 @@ def mark_complete(self): # noqa C901 errors.append("Missing license") if self.license and self.license.is_custom and not self.license_description: errors.append("Missing license description for custom license") - if self.license and self.license.copyright_holder_required and not self.copyright_holder: + if ( + self.license + and self.license.copyright_holder_required + and not self.copyright_holder + ): errors.append("Missing required copyright holder") - if self.kind_id != content_kinds.EXERCISE and not self.files.filter(preset__supplementary=False).exists(): + if ( + self.kind_id != content_kinds.EXERCISE + and not self.files.filter(preset__supplementary=False).exists() + ): errors.append("Missing default file") if self.kind_id == content_kinds.EXERCISE: # Check to see if the exercise has at least one assessment item that has: if not self.assessment_items.filter( # Item with non-blank raw data - ~Q(raw_data="") | ( + ~Q(raw_data="") + | ( # A non-blank question - ~Q(question='') - # Non-blank answers - & ~Q(answers='[]') - # With either an input question or one answer marked as correct - & (Q(type=exercises.INPUT_QUESTION) | Q(answers__iregex=r'"correct":\s*true')) + ~Q(question="") + # Non-blank answers, unless it is a free response question + # (which is allowed to have no answers) + & (~Q(answers="[]") | Q(type=exercises.FREE_RESPONSE)) + # With either an input or free response question or one answer marked as correct + & ( + Q(type=exercises.INPUT_QUESTION) + | Q(type=exercises.FREE_RESPONSE) + | Q(answers__iregex=r'"correct":\s*true') + ) ) ).exists(): - errors.append("No questions with question text and complete answers") + errors.append( + "No questions with question text and complete answers" + ) # Check that it has a mastery model set # Either check for the previous location for the mastery model, or rely on our completion criteria validation # that if it has been set, then it has been set correctly. - criterion = self.extra_fields.get("options", {}).get("completion_criteria") + criterion = self.extra_fields.get("options", {}).get( + "completion_criteria" + ) if not (self.extra_fields.get("mastery_model") or criterion): errors.append("Missing mastery criterion") if criterion: try: - completion_criteria.validate(criterion, kind=content_kinds.EXERCISE) + completion_criteria.validate( + criterion, kind=content_kinds.EXERCISE + ) except completion_criteria.ValidationError: errors.append("Mastery criterion is defined but is invalid") else: - criterion = self.extra_fields and self.extra_fields.get("options", {}).get("completion_criteria", {}) + criterion = self.extra_fields and self.extra_fields.get( + "options", {} + ).get("completion_criteria", {}) if criterion: try: completion_criteria.validate(criterion, kind=self.kind_id) @@ -1919,8 +2314,13 @@ def make_content_id_unique(self): If self is NOT an original contentnode (in other words, a copied contentnode) and a contentnode with same content_id exists then we update self's content_id. """ - is_node_original = self.original_source_node_id is None or self.original_source_node_id == self.node_id - node_same_content_id = ContentNode.objects.exclude(pk=self.pk).filter(content_id=self.content_id) + is_node_original = ( + self.original_source_node_id is None + or self.original_source_node_id == self.node_id + ) + node_same_content_id = ContentNode.objects.exclude(pk=self.pk).filter( + content_id=self.content_id + ) if (not is_node_original) and node_same_content_id.exists(): ContentNode.objects.filter(pk=self.pk).update(content_id=uuid.uuid4().hex) @@ -1947,9 +2347,7 @@ def move_to(self, target, *args, **kwargs): def set_default_learning_activity(self): if self.learning_activities is None: if self.kind in kind_activity_map: - self.learning_activities = { - kind_activity_map[self.kind]: True - } + self.learning_activities = {kind_activity_map[self.kind]: True} def save(self, skip_lock=False, *args, **kwargs): if self._state.adding: @@ -1977,15 +2375,21 @@ def save(self, skip_lock=False, *args, **kwargs): same_order = old_parent_id == self.parent_id if not same_order: - changed_ids = list(filter(lambda x: x is not None, set([old_parent_id, self.parent_id]))) + changed_ids = list( + filter(lambda x: x is not None, set([old_parent_id, self.parent_id])) + ) else: changed_ids = [] if not same_order and not skip_lock: # Lock the mptt fields for the trees of the old and new parent - with ContentNode.objects.lock_mptt(*ContentNode.objects - .filter(id__in=[pid for pid in [old_parent_id, self.parent_id] if pid]) - .values_list('tree_id', flat=True).distinct()): + with ContentNode.objects.lock_mptt( + *ContentNode.objects.filter( + id__in=[pid for pid in [old_parent_id, self.parent_id] if pid] + ) + .values_list("tree_id", flat=True) + .distinct() + ): super(ContentNode, self).save(*args, **kwargs) # Always write to the database for the parent change updates, as we have # no persistent object references for the original and new parent to modify @@ -2002,7 +2406,7 @@ def save(self, skip_lock=False, *args, **kwargs): save.alters_data = True def delete(self, *args, **kwargs): - parent = self.parent or self._field_updates.changed().get('parent') + parent = self.parent or self._field_updates.changed().get("parent") if parent: parent.changed = True parent.save() @@ -2025,15 +2429,30 @@ def copy_to( excluded_descendants=None, can_edit_source_channel=None, batch_size=None, - progress_tracker=None + progress_tracker=None, ): - return self._tree_manager.copy_node(self, target, position, pk, mods, excluded_descendants, can_edit_source_channel, batch_size, progress_tracker)[0] + return self._tree_manager.copy_node( + self, + target, + position, + pk, + mods, + excluded_descendants, + can_edit_source_channel, + batch_size, + progress_tracker, + )[0] def copy(self): return self.copy_to() def is_publishable(self): - return self.complete and self.get_descendants(include_self=True).exclude(kind_id=content_kinds.TOPIC).exists() + return ( + self.complete + and self.get_descendants(include_self=True) + .exclude(kind_id=content_kinds.TOPIC) + .exists() + ) class Meta: verbose_name = "Topic" @@ -2047,14 +2466,18 @@ class Meta: class ContentKind(models.Model): - kind = models.CharField(primary_key=True, max_length=200, choices=content_kinds.choices) + kind = models.CharField( + primary_key=True, max_length=200, choices=content_kinds.choices + ) def __str__(self): return self.kind class FileFormat(models.Model): - extension = models.CharField(primary_key=True, max_length=40, choices=file_formats.choices) + extension = models.CharField( + primary_key=True, max_length=40, choices=file_formats.choices + ) mimetype = models.CharField(max_length=200, blank=True) def __str__(self): @@ -2062,7 +2485,9 @@ def __str__(self): class FormatPreset(models.Model): - id = models.CharField(primary_key=True, max_length=150, choices=format_presets.choices) + id = models.CharField( + primary_key=True, max_length=150, choices=format_presets.choices + ) readable_name = models.CharField(max_length=400) multi_language = models.BooleanField(default=False) supplementary = models.BooleanField(default=False) @@ -2070,7 +2495,9 @@ class FormatPreset(models.Model): subtitle = models.BooleanField(default=False) display = models.BooleanField(default=True) # Render on client side order = models.IntegerField(default=0) - kind = models.ForeignKey(ContentKind, related_name='format_presets', null=True, on_delete=models.SET_NULL) + kind = models.ForeignKey( + ContentKind, related_name="format_presets", null=True, on_delete=models.SET_NULL + ) allowed_formats = models.ManyToManyField(FileFormat, blank=True) def __str__(self): @@ -2086,10 +2513,7 @@ def guess_format_preset(cls, filename): _, ext = os.path.splitext(filename) ext = ext.lstrip(".") - f = FormatPreset.objects.filter( - allowed_formats__extension=ext, - display=True - ) + f = FormatPreset.objects.filter(allowed_formats__extension=ext, display=True) return f.first() @classmethod @@ -2111,11 +2535,18 @@ class Language(models.Model): lang_subcode = models.CharField(max_length=10, db_index=True, blank=True, null=True) readable_name = models.CharField(max_length=100, blank=True) native_name = models.CharField(max_length=100, blank=True) - lang_direction = models.CharField(max_length=3, choices=languages.LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0]) + lang_direction = models.CharField( + max_length=3, + choices=languages.LANGUAGE_DIRECTIONS, + default=languages.LANGUAGE_DIRECTIONS[0][0], + ) def ietf_name(self): - return "{code}-{subcode}".format(code=self.lang_code, - subcode=self.lang_subcode) if self.lang_subcode else self.lang_code + return ( + "{code}-{subcode}".format(code=self.lang_code, subcode=self.lang_subcode) + if self.lang_subcode + else self.lang_code + ) def __str__(self): return self.ietf_name() @@ -2125,13 +2556,23 @@ def __str__(self): class AssessmentItem(models.Model): - type = models.CharField(max_length=50, default="multiplechoice") + type = models.CharField( + max_length=50, + choices=exercises.question_choices + (("true_false", "True/False"),), + default=exercises.MULTIPLE_SELECTION, + ) question = models.TextField(blank=True) hints = models.TextField(default="[]") answers = models.TextField(default="[]") order = models.IntegerField(default=1) - contentnode = models.ForeignKey('ContentNode', related_name="assessment_items", blank=True, null=True, - db_index=True, on_delete=models.CASCADE) + contentnode = models.ForeignKey( + "ContentNode", + related_name="assessment_items", + blank=True, + null=True, + db_index=True, + on_delete=models.CASCADE, + ) # Note this field is indexed, but we are using the Index API to give it an explicit name, see the model Meta assessment_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False) raw_data = models.TextField(blank=True) @@ -2151,7 +2592,7 @@ class Meta: models.Index(fields=["assessment_id"], name=ASSESSMENT_ID_INDEX_NAME), ] - unique_together = ['contentnode', 'assessment_id'] + unique_together = ["contentnode", "assessment_id"] _permission_filter = Q(tree_id=OuterRef("contentnode__tree_id")) @@ -2186,14 +2627,20 @@ def filter_view_queryset(cls, queryset, user): ) if not user_id: - return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True) + return queryset.annotate( + edit=boolean_val(False), view=boolean_val(False) + ).filter(public=True) edit_cte = PermissionCTE.editable_channels(user_id) view_cte = PermissionCTE.view_only_channels(user_id) - queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate( - edit=edit_cte.exists(cls._permission_filter), - view=view_cte.exists(cls._permission_filter), + queryset = ( + queryset.with_cte(edit_cte) + .with_cte(view_cte) + .annotate( + edit=edit_cte.exists(cls._permission_filter), + view=view_cte.exists(cls._permission_filter), + ) ) if user.is_admin: @@ -2225,8 +2672,14 @@ def delete(self, *args, **kwargs): class SlideshowSlide(models.Model): - contentnode = models.ForeignKey('ContentNode', related_name="slideshow_slides", blank=True, null=True, - db_index=True, on_delete=models.CASCADE) + contentnode = models.ForeignKey( + "ContentNode", + related_name="slideshow_slides", + blank=True, + null=True, + db_index=True, + on_delete=models.CASCADE, + ) sort_order = models.FloatField(default=1.0) metadata = JSONField(default=dict) @@ -2235,9 +2688,16 @@ class StagedFile(models.Model): """ Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit """ + checksum = models.CharField(max_length=400, blank=True, db_index=True) file_size = models.IntegerField(blank=True, null=True) - uploaded_by = models.ForeignKey(User, related_name='staged_files', blank=True, null=True, on_delete=models.CASCADE) + uploaded_by = models.ForeignKey( + User, + related_name="staged_files", + blank=True, + null=True, + on_delete=models.CASCADE, + ) FILE_DISTINCT_INDEX_NAME = "file_checksum_file_size_idx" @@ -2257,27 +2717,73 @@ class File(models.Model): The bottom layer of the contentDB schema, defines the basic building brick for content. Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3... """ + id = UUIDField(primary_key=True, default=uuid.uuid4) checksum = models.CharField(max_length=400, blank=True, db_index=True) file_size = models.IntegerField(blank=True, null=True) - file_on_disk = models.FileField(upload_to=object_storage_name, storage=default_storage, max_length=500, - blank=True) - contentnode = models.ForeignKey(ContentNode, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE) - assessment_item = models.ForeignKey(AssessmentItem, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE) - slideshow_slide = models.ForeignKey(SlideshowSlide, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE) - file_format = models.ForeignKey(FileFormat, related_name='files', blank=True, null=True, db_index=True, on_delete=models.SET_NULL) - preset = models.ForeignKey(FormatPreset, related_name='files', blank=True, null=True, db_index=True, on_delete=models.SET_NULL) - language = models.ForeignKey(Language, related_name='files', blank=True, null=True, on_delete=models.SET_NULL) + file_on_disk = models.FileField( + upload_to=object_storage_name, + storage=default_storage, + max_length=500, + blank=True, + ) + contentnode = models.ForeignKey( + ContentNode, + related_name="files", + blank=True, + null=True, + db_index=True, + on_delete=models.CASCADE, + ) + assessment_item = models.ForeignKey( + AssessmentItem, + related_name="files", + blank=True, + null=True, + db_index=True, + on_delete=models.CASCADE, + ) + slideshow_slide = models.ForeignKey( + SlideshowSlide, + related_name="files", + blank=True, + null=True, + db_index=True, + on_delete=models.CASCADE, + ) + file_format = models.ForeignKey( + FileFormat, + related_name="files", + blank=True, + null=True, + db_index=True, + on_delete=models.SET_NULL, + ) + preset = models.ForeignKey( + FormatPreset, + related_name="files", + blank=True, + null=True, + db_index=True, + on_delete=models.SET_NULL, + ) + language = models.ForeignKey( + Language, related_name="files", blank=True, null=True, on_delete=models.SET_NULL + ) original_filename = models.CharField(max_length=255, blank=True) source_url = models.CharField(max_length=400, blank=True, null=True) - uploaded_by = models.ForeignKey(User, related_name='files', blank=True, null=True, on_delete=models.SET_NULL) + uploaded_by = models.ForeignKey( + User, related_name="files", blank=True, null=True, on_delete=models.SET_NULL + ) modified = models.DateTimeField(auto_now=True, verbose_name="modified", null=True) duration = models.IntegerField(blank=True, null=True) objects = CustomManager() - _permission_filter = Q(tree_id=OuterRef("contentnode__tree_id")) | Q(tree_id=OuterRef("assessment_item__contentnode__tree_id")) + _permission_filter = Q(tree_id=OuterRef("contentnode__tree_id")) | Q( + tree_id=OuterRef("assessment_item__contentnode__tree_id") + ) @classmethod def filter_edit_queryset(cls, queryset, user): @@ -2287,13 +2793,18 @@ def filter_edit_queryset(cls, queryset, user): return queryset.none() cte = PermissionCTE.editable_channels(user_id) - queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls._permission_filter)) + queryset = queryset.with_cte(cte).annotate( + edit=cte.exists(cls._permission_filter) + ) if user.is_admin: return queryset return queryset.filter( - Q(edit=True) | Q(uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True) + Q(edit=True) + | Q( + uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True + ) ) @classmethod @@ -2302,22 +2813,34 @@ def filter_view_queryset(cls, queryset, user): queryset = queryset.annotate( public=Exists( - Channel.objects.filter(public=True).filter( + Channel.objects.filter(public=True) + .filter( Q(main_tree__tree_id=OuterRef("contentnode__tree_id")) - | Q(main_tree__tree_id=OuterRef("assessment_item__contentnode__tree_id")) - ).values("pk") + | Q( + main_tree__tree_id=OuterRef( + "assessment_item__contentnode__tree_id" + ) + ) + ) + .values("pk") ), ) if not user_id: - return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True) + return queryset.annotate( + edit=boolean_val(False), view=boolean_val(False) + ).filter(public=True) edit_cte = PermissionCTE.editable_channels(user_id) view_cte = PermissionCTE.view_only_channels(user_id) - queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate( - edit=edit_cte.exists(cls._permission_filter), - view=view_cte.exists(cls._permission_filter), + queryset = ( + queryset.with_cte(edit_cte) + .with_cte(view_cte) + .annotate( + edit=edit_cte.exists(cls._permission_filter), + view=view_cte.exists(cls._permission_filter), + ) ) if user.is_admin: @@ -2327,14 +2850,18 @@ def filter_view_queryset(cls, queryset, user): Q(view=True) | Q(edit=True) | Q(public=True) - | Q(uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True) + | Q( + uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True + ) ) class Admin: pass def __str__(self): - return '{checksum}{extension}'.format(checksum=self.checksum, extension='.' + self.file_format.extension) + return "{checksum}{extension}".format( + checksum=self.checksum, extension="." + self.file_format.extension + ) def filename(self): """ @@ -2374,7 +2901,9 @@ def save(self, set_by_file_on_disk=True, *args, **kwargs): if self.file_format_id not in dict(file_formats.choices): raise ValidationError("Invalid file_format") - if set_by_file_on_disk and self.file_on_disk: # if file_on_disk is supplied, hash out the file + if ( + set_by_file_on_disk and self.file_on_disk + ): # if file_on_disk is supplied, hash out the file if self.checksum is None or self.checksum == "": md5 = hashlib.md5() for chunk in self.file_on_disk.chunks(): @@ -2384,11 +2913,13 @@ def save(self, set_by_file_on_disk=True, *args, **kwargs): if not self.file_size: self.file_size = self.file_on_disk.size if not self.file_format_id: - ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.') + ext = os.path.splitext(self.file_on_disk.name)[1].lstrip(".") if ext in list(dict(file_formats.choices).keys()): self.file_format_id = ext else: - raise ValueError("Files of type `{}` are not supported.".format(ext)) + raise ValueError( + "Files of type `{}` are not supported.".format(ext) + ) super(File, self).save(*args, **kwargs) @@ -2397,15 +2928,20 @@ def save(self, set_by_file_on_disk=True, *args, **kwargs): class Meta: indexes = [ - models.Index(fields=['checksum', 'file_size'], name=FILE_DISTINCT_INDEX_NAME), + models.Index( + fields=["checksum", "file_size"], name=FILE_DISTINCT_INDEX_NAME + ), models.Index(fields=["-modified"], name=FILE_MODIFIED_DESC_INDEX_NAME), ] constraints = [ # enforces that duration is null when not a media preset, but the duration may be null for media presets # but if not-null, should be greater than 0 models.CheckConstraint( - check=(Q(preset__in=MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True)), - name=FILE_DURATION_CONSTRAINT + check=( + Q(preset__in=MEDIA_PRESETS, duration__gt=0) + | Q(duration__isnull=True) + ), + name=FILE_DURATION_CONSTRAINT, ) ] @@ -2419,13 +2955,17 @@ def auto_delete_file_on_delete(sender, instance, **kwargs): """ # Recalculate storage from contentcuration.utils.user import calculate_user_storage + if instance.uploaded_by_id: calculate_user_storage(instance.uploaded_by_id) def delete_empty_file_reference(checksum, extension): - filename = checksum + '.' + extension - if not File.objects.filter(checksum=checksum).exists() and not Channel.objects.filter(thumbnail=filename).exists(): + filename = checksum + "." + extension + if ( + not File.objects.filter(checksum=checksum).exists() + and not Channel.objects.filter(thumbnail=filename).exists() + ): storage_path = generate_object_storage_name(checksum, filename) if default_storage.exists(storage_path): default_storage.delete(storage_path) @@ -2435,22 +2975,33 @@ class PrerequisiteContentRelationship(models.Model): """ Predefine the prerequisite relationship between two ContentNode objects. """ - target_node = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE) - prerequisite = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE) + + target_node = models.ForeignKey( + ContentNode, + related_name="%(app_label)s_%(class)s_target_node", + on_delete=models.CASCADE, + ) + prerequisite = models.ForeignKey( + ContentNode, + related_name="%(app_label)s_%(class)s_prerequisite", + on_delete=models.CASCADE, + ) class Meta: - unique_together = ['target_node', 'prerequisite'] + unique_together = ["target_node", "prerequisite"] def clean(self, *args, **kwargs): # self reference exception if self.target_node == self.prerequisite: - raise IntegrityError('Cannot self reference as prerequisite.') + raise IntegrityError("Cannot self reference as prerequisite.") # immediate cyclic exception - if PrerequisiteContentRelationship.objects.using(self._state.db) \ - .filter(target_node=self.prerequisite, prerequisite=self.target_node): + if PrerequisiteContentRelationship.objects.using(self._state.db).filter( + target_node=self.prerequisite, prerequisite=self.target_node + ): raise IntegrityError( - 'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!' - % (self.target_node, self.prerequisite)) + "Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!" + % (self.target_node, self.prerequisite) + ) # distant cyclic exception # elif # raise Exception('Note: Prerequisite relationship is acyclic! %s and %s forms a closed loop!' % ( @@ -2463,41 +3014,60 @@ def save(self, *args, **kwargs): super(PrerequisiteContentRelationship, self).save(*args, **kwargs) def __unicode__(self): - return u'%s' % (self.pk) + return "%s" % (self.pk) class RelatedContentRelationship(models.Model): """ Predefine the related relationship between two ContentNode objects. """ - contentnode_1 = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_1', on_delete=models.CASCADE) - contentnode_2 = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_2', on_delete=models.CASCADE) + + contentnode_1 = models.ForeignKey( + ContentNode, related_name="%(app_label)s_%(class)s_1", on_delete=models.CASCADE + ) + contentnode_2 = models.ForeignKey( + ContentNode, related_name="%(app_label)s_%(class)s_2", on_delete=models.CASCADE + ) class Meta: - unique_together = ['contentnode_1', 'contentnode_2'] + unique_together = ["contentnode_1", "contentnode_2"] def save(self, *args, **kwargs): # self reference exception if self.contentnode_1 == self.contentnode_2: - raise IntegrityError('Cannot self reference as related.') + raise IntegrityError("Cannot self reference as related.") # handle immediate cyclic - if RelatedContentRelationship.objects.using(self._state.db) \ - .filter(contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1): + if RelatedContentRelationship.objects.using(self._state.db).filter( + contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1 + ): return # silently cancel the save super(RelatedContentRelationship, self).save(*args, **kwargs) class Invitation(models.Model): """ Invitation to edit channel """ + id = UUIDField(primary_key=True, default=uuid.uuid4) accepted = models.BooleanField(default=False) declined = models.BooleanField(default=False) revoked = models.BooleanField(default=False) - invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, related_name='sent_to') + invited = models.ForeignKey( + settings.AUTH_USER_MODEL, + on_delete=models.SET_NULL, + null=True, + related_name="sent_to", + ) share_mode = models.CharField(max_length=50, default=EDIT_ACCESS) email = models.EmailField(max_length=100, null=True) - sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='sent_by', null=True, on_delete=models.CASCADE) - channel = models.ForeignKey('Channel', null=True, related_name='pending_editors', on_delete=models.CASCADE) + sender = models.ForeignKey( + settings.AUTH_USER_MODEL, + related_name="sent_by", + null=True, + on_delete=models.CASCADE, + ) + channel = models.ForeignKey( + "Channel", null=True, related_name="pending_editors", on_delete=models.CASCADE + ) first_name = models.CharField(max_length=100, blank=True) last_name = models.CharField(max_length=100, blank=True, null=True) @@ -2525,9 +3095,7 @@ def filter_edit_queryset(cls, queryset, user): return queryset return queryset.filter( - Q(email__iexact=user.email) - | Q(sender=user) - | Q(channel__editors=user) + Q(email__iexact=user.email) | Q(sender=user) | Q(channel__editors=user) ).distinct() @classmethod @@ -2551,22 +3119,38 @@ class Change(models.Model): # so that we can validate they have permissions to do so # allow to be null so that we don't lose changes if a user # account is hard deleted. - created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.SET_NULL, related_name="changes_by_user") + created_by = models.ForeignKey( + settings.AUTH_USER_MODEL, + null=True, + blank=True, + on_delete=models.SET_NULL, + related_name="changes_by_user", + ) # Almost all changes are related to channels, but some are specific only to users # so we allow this to be nullable for these edge cases. # Indexed by default because it's a ForeignKey field. - channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=models.CASCADE) + channel = models.ForeignKey( + Channel, null=True, blank=True, on_delete=models.CASCADE + ) # For those changes related to users, store a user value instead of channel # this may be different to created_by, as changes to invitations affect individual users. # Indexed by default because it's a ForeignKey field. - user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.CASCADE, related_name="changes_about_user") + user = models.ForeignKey( + settings.AUTH_USER_MODEL, + null=True, + blank=True, + on_delete=models.CASCADE, + related_name="changes_about_user", + ) # Use client_rev to keep track of changes coming from the client side # but let it be blank or null for changes we generate on the server side client_rev = models.IntegerField(null=True, blank=True) # client_rev numbers are by session, we add the session key here for bookkeeping # to allow a check within the same session to return whether a change has been applied # or not, and hence remove it from the frontend - session = models.ForeignKey(Session, null=True, blank=True, on_delete=models.SET_NULL) + session = models.ForeignKey( + Session, null=True, blank=True, on_delete=models.SET_NULL + ) table = models.CharField(max_length=32) change_type = models.IntegerField() # Use the DRF JSONEncoder class as the encoder here @@ -2601,12 +3185,20 @@ def _create_from_change( ): change_type = data.pop("type") if table is None or table not in ALL_TABLES: - raise TypeError("table is a required argument for creating changes and must be a valid table name") + raise TypeError( + "table is a required argument for creating changes and must be a valid table name" + ) if change_type is None or change_type not in ALL_CHANGES: - raise TypeError("change_type is a required argument for creating changes and must be a valid change type integer") + raise TypeError( + "change_type is a required argument for creating changes and must be a valid change type integer" + ) # Don't let someone mark a change as unpublishable if it's not in the list of tables that make changes that we can publish # also, by definition, publishing is not a publishable change - this probably doesn't matter, but making sense is nice. - unpublishable = unpublishable or table not in PUBLISHABLE_CHANGE_TABLES or change_type == PUBLISHED + unpublishable = ( + unpublishable + or table not in PUBLISHABLE_CHANGE_TABLES + or change_type == PUBLISHED + ) return cls( session_id=session_key, created_by_id=created_by_id, @@ -2621,7 +3213,14 @@ def _create_from_change( ) @classmethod - def create_changes(cls, changes, created_by_id=None, session_key=None, applied=False, unpublishable=False): + def create_changes( + cls, + changes, + created_by_id=None, + session_key=None, + applied=False, + unpublishable=False, + ): change_models = [] for change in changes: change_models.append( @@ -2630,7 +3229,7 @@ def create_changes(cls, changes, created_by_id=None, session_key=None, applied=F session_key=session_key, applied=applied, unpublishable=unpublishable, - **change + **change, ) ) @@ -2638,22 +3237,37 @@ def create_changes(cls, changes, created_by_id=None, session_key=None, applied=F return change_models @classmethod - def create_change(cls, change, created_by_id=None, session_key=None, applied=False, unpublishable=False): - obj = cls._create_from_change(created_by_id=created_by_id, session_key=session_key, applied=applied, unpublishable=unpublishable, **change) + def create_change( + cls, + change, + created_by_id=None, + session_key=None, + applied=False, + unpublishable=False, + ): + obj = cls._create_from_change( + created_by_id=created_by_id, + session_key=session_key, + applied=applied, + unpublishable=unpublishable, + **change, + ) obj.save() return obj @classmethod def serialize(cls, change): datum = get_attribute(change, ["kwargs"]).copy() - datum.update({ - "server_rev": get_attribute(change, ["server_rev"]), - "table": get_attribute(change, ["table"]), - "type": get_attribute(change, ["change_type"]), - "channel_id": get_attribute(change, ["channel_id"]), - "user_id": get_attribute(change, ["user_id"]), - "created_by_id": get_attribute(change, ["created_by_id"]) - }) + datum.update( + { + "server_rev": get_attribute(change, ["server_rev"]), + "table": get_attribute(change, ["table"]), + "type": get_attribute(change, ["change_type"]), + "channel_id": get_attribute(change, ["channel_id"]), + "user_id": get_attribute(change, ["user_id"]), + "created_by_id": get_attribute(change, ["created_by_id"]), + } + ) return datum def serialize_to_change_dict(self): @@ -2666,22 +3280,31 @@ class CustomTaskMetadata(models.Model): max_length=255, unique=True, ) - user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="tasks", on_delete=models.CASCADE, null=True) + user = models.ForeignKey( + settings.AUTH_USER_MODEL, + related_name="tasks", + on_delete=models.CASCADE, + null=True, + ) channel_id = DjangoUUIDField(db_index=True, null=True, blank=True) - progress = models.IntegerField(null=True, blank=True, validators=[MinValueValidator(0), MaxValueValidator(100)]) + progress = models.IntegerField( + null=True, blank=True, validators=[MinValueValidator(0), MaxValueValidator(100)] + ) # A hash of the task name and kwargs for identifying repeat tasks signature = models.CharField(null=True, blank=False, max_length=32) date_created = models.DateTimeField( auto_now_add=True, - verbose_name=_('Created DateTime'), - help_text=_('Datetime field when the custom_metadata for task was created in UTC') + verbose_name=_("Created DateTime"), + help_text=_( + "Datetime field when the custom_metadata for task was created in UTC" + ), ) class Meta: indexes = [ models.Index( - fields=['signature'], - name='task_result_signature', + fields=["signature"], + name="task_result_signature", ), ] @@ -2697,13 +3320,13 @@ class BaseFeedback(models.Model): # time_shown: timestamp of when the recommendations are first shown created_at = models.DateTimeField(auto_now_add=True) - # for RecommendationsEvent class conntentnode_id represents: + # for RecommendationsEvent class contentnode_id represents: # target_topic_id that the ID of the topic the user # initiated the import from (where the imported content will go) # # for ReccomendationsInteractionEvent class contentnode_id represents: # contentNode_id of one of the item being interacted with - # (this must correspond to one of the items in the “content” array on the RecommendationEvent) + # (this must correspond to one of the items in the "content" array on the RecommendationEvent) # # for RecommendationsFlaggedEvent class contentnode_id represents: # contentnode_id of the content that is being flagged. @@ -2727,7 +3350,9 @@ class Meta: class BaseFeedbackInteractionEvent(models.Model): - feedback_type = models.CharField(max_length=50, choices=feedback.FEEDBACK_TYPE_CHOICES) + feedback_type = models.CharField( + max_length=50, choices=feedback.FEEDBACK_TYPE_CHOICES + ) feedback_reason = models.TextField(max_length=1500) class Meta: @@ -2744,6 +3369,6 @@ class RecommendationsInteractionEvent(BaseFeedback, BaseFeedbackInteractionEvent class RecommendationsEvent(BaseFeedback, BaseFeedbackEvent): # timestamp of when the user navigated away from the recommendation list - time_hidden = models.DateTimeField() + time_hidden = models.DateTimeField(null=True, blank=True) # A list of JSON blobs, representing the content items in the list of recommendations. content = models.JSONField(default=list) diff --git a/contentcuration/contentcuration/node_metadata/cte.py b/contentcuration/contentcuration/node_metadata/cte.py index 36b0241635..29154710ac 100644 --- a/contentcuration/contentcuration/node_metadata/cte.py +++ b/contentcuration/contentcuration/node_metadata/cte.py @@ -34,10 +34,10 @@ def get(self): return self.cte def build(self): - raise NotImplementedError('Build method must create CTE') + raise NotImplementedError("Build method must create CTE") def join(self, query): - raise NotImplementedError('Join method must join query with CTE') + raise NotImplementedError("Join method must join query with CTE") @property def col(self): @@ -47,17 +47,19 @@ def col(self): class LeftContentCTE(MetadataCTE): def join(self, query): cte = self.get() - return cte.join(query, content_id=cte.col.content_id, _join_type=LOUTER).with_cte(cte) + return cte.join( + query, content_id=cte.col.content_id, _join_type=LOUTER + ).with_cte(cte) class TreeMetadataCTE(MetadataCTE): - columns = ['tree_id'] + columns = ["tree_id"] def build(self): - tree_ids = self.query.values('tree_id') + tree_ids = self.query.values("tree_id") return With( ContentNode.objects.filter(tree_id__in=tree_ids).values(*set(self.columns)), - name='tree_cte' + name="tree_cte", ) def join(self, query): @@ -66,13 +68,14 @@ def join(self, query): class AssessmentCountCTE(LeftContentCTE): - columns = ['content_id'] + columns = ["content_id"] def build(self): - q = self.query.filter(kind_id=content_kinds.EXERCISE, assessment_items__deleted=False)\ - .annotate(assessment_count=Count(F('assessment_items__id'), distinct=True)) + q = self.query.filter( + kind_id=content_kinds.EXERCISE, assessment_items__deleted=False + ).annotate(assessment_count=Count(F("assessment_items__id"), distinct=True)) - return With(q.values(*set(self.columns)), name='assessment_count_cte') + return With(q.values(*set(self.columns)), name="assessment_count_cte") class FileMetadataCTE(LeftContentCTE): @@ -83,19 +86,22 @@ def build(self): columns = set(self.columns) files = nodes.values( - 'content_id', - **{column: F('files__{}'.format(column)) for column in columns} + "content_id", + **{column: F("files__{}".format(column)) for column in columns} ).distinct() assessment_files = nodes.values( - 'content_id', - **{column: F('assessment_items__files__{}'.format(column)) for column in columns} + "content_id", + **{ + column: F("assessment_items__files__{}".format(column)) + for column in columns + } ).distinct() - return With(files.union(assessment_files).values(*columns), name='file_cte') + return With(files.union(assessment_files).values(*columns), name="file_cte") class ResourceSizeCTE(LeftContentCTE): - columns = ['content_id'] + columns = ["content_id"] def build(self): """ @@ -103,20 +109,24 @@ def build(self): file records would produce incorrect result for resource sizes due to summing. """ files_cte = FileMetadataCTE(self.query) - files_cte.add_columns(('file_size', 'checksum')) + files_cte.add_columns(("file_size", "checksum")) - resource_condition = BooleanComparison(F('kind_id'), '!=', Value(content_kinds.TOPIC)) + resource_condition = BooleanComparison( + F("kind_id"), "!=", Value(content_kinds.TOPIC) + ) - q = files_cte.join(self.query).annotate(resource_size=Sum( - Case( - # aggregate file_size when selected node is not a topic - When( - condition=WhenQ(resource_condition), - then=Coalesce(files_cte.col.file_size, Value(0)), + q = files_cte.join(self.query).annotate( + resource_size=Sum( + Case( + # aggregate file_size when selected node is not a topic + When( + condition=WhenQ(resource_condition), + then=Coalesce(files_cte.col.file_size, Value(0)), + ), + default=Value(0), ), - default=Value(0) - ), - output_field=IntegerField() - )) + output_field=IntegerField(), + ) + ) - return With(q.values(*set(self.columns)), name='resource_size_cte') + return With(q.values(*set(self.columns)), name="resource_size_cte") diff --git a/contentcuration/contentcuration/node_metadata/query.py b/contentcuration/contentcuration/node_metadata/query.py index eb544a1658..221e80f341 100644 --- a/contentcuration/contentcuration/node_metadata/query.py +++ b/contentcuration/contentcuration/node_metadata/query.py @@ -50,7 +50,9 @@ def get(self, node_pk): :return: A dict of metadata for the node identified by `node_pk` """ if self.query is None: - return Metadata(ContentNode.filter_by_pk(pk=node_pk), **self.annotations).get(node_pk) + return Metadata( + ContentNode.filter_by_pk(pk=node_pk), **self.annotations + ).get(node_pk) if self.metadata is None: self.metadata = {} @@ -58,7 +60,7 @@ def get(self, node_pk): # Finally, clear ordering (MPTT adds ordering by default) for row in query: - self.metadata.update({row.pop('id'): row}) + self.metadata.update({row.pop("id"): row}) return self.metadata.get(node_pk) @@ -67,7 +69,7 @@ def build(self): :return: A complete queryset to return the metadata """ if len(self.annotations) == 0: - raise ValueError('No metadata to retrieve') + raise ValueError("No metadata to retrieve") ctes = [] @@ -90,19 +92,26 @@ def build(self): if len(ctes) > 0: for cte in ctes: query = cte.join(query) - annotations.update({ - field_name: annotation.get_annotation(cte) - for field_name, annotation in self.annotations.items() - if isinstance(annotation, MetadataAnnotation) - and annotation.cte and isinstance(cte, annotation.cte) - }) - - annotations.update(**{ - field_name: annotation.get_annotation(None) - if isinstance(annotation, MetadataAnnotation) else annotation - for field_name, annotation in self.annotations.items() - if not isinstance(annotation, MetadataAnnotation) or annotation.cte is None - }) + annotations.update( + { + field_name: annotation.get_annotation(cte) + for field_name, annotation in self.annotations.items() + if isinstance(annotation, MetadataAnnotation) + and annotation.cte + and isinstance(cte, annotation.cte) + } + ) + + annotations.update( + **{ + field_name: annotation.get_annotation(None) + if isinstance(annotation, MetadataAnnotation) + else annotation + for field_name, annotation in self.annotations.items() + if not isinstance(annotation, MetadataAnnotation) + or annotation.cte is None + } + ) # Finally, clear ordering (MPTT adds ordering by default) - return query.values('id').annotate(**annotations).order_by() + return query.values("id").annotate(**annotations).order_by() diff --git a/contentcuration/contentcuration/not_production_settings.py b/contentcuration/contentcuration/not_production_settings.py index e98410433d..afcc6460bc 100644 --- a/contentcuration/contentcuration/not_production_settings.py +++ b/contentcuration/contentcuration/not_production_settings.py @@ -3,8 +3,8 @@ ALLOWED_HOSTS = ["studio.local", "192.168.31.9", "127.0.0.1", "*"] ACCOUNT_ACTIVATION_DAYS = 7 -EMAIL_BACKEND = 'postmark.django_backend.EmailBackend' -POSTMARK_API_KEY = 'POSTMARK_API_TEST' +EMAIL_BACKEND = "postmark.django_backend.EmailBackend" +POSTMARK_API_KEY = "POSTMARK_API_TEST" POSTMARK_TEST_MODE = True SITE_ID = 2 @@ -19,3 +19,6 @@ ) AWS_AUTO_CREATE_BUCKET = True + +# Use local instance for curriculum automation for development +CURRICULUM_AUTOMATION_API_URL = "http://localhost:8000" diff --git a/contentcuration/contentcuration/perftools/objective.py b/contentcuration/contentcuration/perftools/objective.py index 8777e5a089..0405be92d5 100644 --- a/contentcuration/contentcuration/perftools/objective.py +++ b/contentcuration/contentcuration/perftools/objective.py @@ -1,8 +1,12 @@ import sys -# TODO: Investigate more precise timing libraries import time -from contentcuration.models import ContentKind, ContentNode, File +from contentcuration.models import ContentKind +from contentcuration.models import ContentNode +from contentcuration.models import File + +# TODO: Investigate more precise timing libraries + def print_progress(text): sys.stdout.write("\r" + text) @@ -16,15 +20,21 @@ class Objective: """ def __init__(self): - self.topic, topic_created = ContentKind.objects.get_or_create(kind='Topic') - self.root_node = ContentNode.objects.create(title='test_server_perf Root Node', kind=self.topic) + self.topic, topic_created = ContentKind.objects.get_or_create(kind="Topic") + self.root_node = ContentNode.objects.create( + title="test_server_perf Root Node", kind=self.topic + ) def __del__(self): if self.root_node: - raise Exception("Test cleanup not run. Ensure you manually delete root node with id {} and all nodes and files that are connected to it.".format(self.root_node.pk)) + raise Exception( + "Test cleanup not run. Ensure you manually delete root node with id {} and all nodes and files that are connected to it.".format( + self.root_node.pk + ) + ) def cleanup(self): - print("Performing clean up, please wait...") + print("Performing clean up, please wait...") # noqa: T201 try: if self.root_node: files = File.objects.filter(contentnode=self.root_node) @@ -33,9 +43,13 @@ def cleanup(self): self.root_node.delete() self.root_node = None - except Exception as e: + except Exception: if self.root_node: - print("Error in cleanup. Root node with id {} may still exist.".format(self.root_node.pk)) + print( # noqa: T201 + "Error in cleanup. Root node with id {} may still exist.".format( + self.root_node.pk + ) + ) raise def create_content_nodes(self, num_nodes=100): @@ -52,11 +66,15 @@ def create_content_nodes(self, num_nodes=100): start = time.time() for i in range(num_nodes): - node = ContentNode.objects.create(title="test_server_perf Node {}".format(i), parent=parent, kind=self.topic) + node = ContentNode.objects.create( + title="test_server_perf Node {}".format(i), + parent=parent, + kind=self.topic, + ) # try to create a multi-level tree structure to better test tree recalc operations if num_nodes > 20: if i % (num_nodes / 10) == 0: - sys.stdout.write('.') + sys.stdout.write(".") sys.stdout.flush() parent = node @@ -76,7 +94,7 @@ def create_files(self, num_files=100): start = time.time() for i in range(num_files): - file_obj = File.objects.create() + _ = File.objects.create() elapsed = time.time() - start if File.objects.count() != current_files + num_files: @@ -100,7 +118,11 @@ def get_object_creation_stats(self, object_type, num_objects=100, num_runs=10): run_times = [] for i in range(num_runs): - print_progress("Creating {} {} objects. Test run {} of {}".format(num_objects, object_type, i+1, num_runs)) + print_progress( + "Creating {} {} objects. Test run {} of {}".format( + num_objects, object_type, i + 1, num_runs + ) + ) run_times.append(creation_func(num_objects)) return self._calc_stats(run_times, num_objects) @@ -116,7 +138,11 @@ def get_object_creation_stats_mptt_delay(self, num_objects=100, num_runs=10): run_times = [] for i in range(num_runs): - print_progress("Creating {} {} objects with delay_mptt_updates. Test run {} of {}".format(num_objects, 'ContentNode', i+1, num_runs)) + print_progress( + "Creating {} {} objects with delay_mptt_updates. Test run {} of {}".format( + num_objects, "ContentNode", i + 1, num_runs + ) + ) with ContentNode.objects.delay_mptt_updates(): run_times.append(self.create_content_nodes(num_objects)) @@ -128,8 +154,10 @@ def get_large_channel_creation_stats(self): num_files = num_nodes * 3 stats = {} - stats['Node creation time'] = self.get_object_creation_stats_mptt_delay(num_nodes, num_runs=1)['min'] - stats['File creation time'] = self.create_files(num_files) + stats["Node creation time"] = self.get_object_creation_stats_mptt_delay( + num_nodes, num_runs=1 + )["min"] + stats["File creation time"] = self.create_files(num_files) return stats @@ -141,8 +169,8 @@ def _calc_stats(self, run_times, num_items): average = total_time / len(run_times) return { - 'min': run_times[0], - 'max': run_times[-1], - 'average': average, - 'per_record_average': average / num_items + "min": run_times[0], + "max": run_times[-1], + "average": average, + "per_record_average": average / num_items, } diff --git a/contentcuration/contentcuration/production_settings.py b/contentcuration/contentcuration/production_settings.py index 1d0a7d456d..a00bf43a41 100644 --- a/contentcuration/contentcuration/production_settings.py +++ b/contentcuration/contentcuration/production_settings.py @@ -1,7 +1,5 @@ # flake8: noqa # ignore the entire file in general, since we do a lot of overrides here which break pep8 compat -from __future__ import absolute_import - from . import settings as base_settings from .settings import * # noqa from contentcuration.utils.secretmanagement import get_secret @@ -12,11 +10,11 @@ MEDIA_ROOT = base_settings.STORAGE_ROOT -DEFAULT_FILE_STORAGE = 'contentcuration.utils.gcs_storage.CompositeGCS' +DEFAULT_FILE_STORAGE = "contentcuration.utils.gcs_storage.CompositeGCS" SESSION_ENGINE = "django.contrib.sessions.backends.db" # email settings -EMAIL_BACKEND = 'postmark.django_backend.EmailBackend' +EMAIL_BACKEND = "postmark.django_backend.EmailBackend" POSTMARK_API_KEY = get_secret("EMAIL_CREDENTIALS_POSTMARK_API_KEY") LANGUAGE_CODE = get_secret("LANGUAGE_CODE") or "en" @@ -24,22 +22,22 @@ # Google drive settings GOOGLE_STORAGE_REQUEST_SHEET = "1uC1nsJPx_5g6pQT6ay0qciUVya0zUFJ8wIwbsTEh60Y" GOOGLE_FEEDBACK_SHEET = "1aPQ9_zMJgNAMf0Oqr26NChzwSEJz6oQHuPCPKmNRFRQ" -GOOGLE_AUTH_JSON = get_secret("GOOGLE_DRIVE_AUTH_JSON") or base_settings.GOOGLE_AUTH_JSON +GOOGLE_AUTH_JSON = ( + get_secret("GOOGLE_DRIVE_AUTH_JSON") or base_settings.GOOGLE_AUTH_JSON +) # Activate django-prometheus -INSTALLED_APPS = INSTALLED_APPS + ( - "django_prometheus", -) +INSTALLED_APPS = INSTALLED_APPS + ("django_prometheus",) MIDDLEWARE = ( - ("django_prometheus.middleware.PrometheusBeforeMiddleware",) + - MIDDLEWARE + - ("django_prometheus.middleware.PrometheusAfterMiddleware",) + ("django_prometheus.middleware.PrometheusBeforeMiddleware",) + + MIDDLEWARE + + ("django_prometheus.middleware.PrometheusAfterMiddleware",) ) CACHES["default"]["BACKEND"] = "django_prometheus.cache.backends.redis.RedisCache" if SITE_READ_ONLY: - CACHES['default']['BACKEND'] = "django_prometheus.cache.backends.locmem.LocMemCache" + CACHES["default"]["BACKEND"] = "django_prometheus.cache.backends.locmem.LocMemCache" DATABASES["default"]["ENGINE"] = "django_prometheus.db.backends.postgresql" diff --git a/contentcuration/contentcuration/profile_settings.py b/contentcuration/contentcuration/profile_settings.py deleted file mode 100644 index 7b0e35b389..0000000000 --- a/contentcuration/contentcuration/profile_settings.py +++ /dev/null @@ -1,3 +0,0 @@ -from .not_production_settings import * # noqa - -MIDDLEWARE = ("whitenoise.middleware.WhiteNoiseMiddleware",) + MIDDLEWARE # noqa diff --git a/contentcuration/contentcuration/ricecooker_versions.py b/contentcuration/contentcuration/ricecooker_versions.py index 97716a65b9..8f6285bb9d 100644 --- a/contentcuration/contentcuration/ricecooker_versions.py +++ b/contentcuration/contentcuration/ricecooker_versions.py @@ -1,14 +1,9 @@ -from future import standard_library - -standard_library.install_aliases() - - """ Latest ricecooker version Any version >= VERSION_OK will get a message that the version is "up to date" (log level = info) """ -VERSION_OK = "0.6.32" # this gets overwritten to current v. after XML RPC call +VERSION_OK = "0.7.3" VERSION_OK_MESSAGE = "Ricecooker v{} is up-to-date." """ @@ -16,7 +11,7 @@ Any version < VERSION_OK and >= VERSION_SOFT_WARNING will get a recommendation to upgrade before running (log level = warning) """ -VERSION_SOFT_WARNING = "0.6.30" +VERSION_SOFT_WARNING = "0.7.0" VERSION_SOFT_WARNING_MESSAGE = ( "You are using Ricecooker v{}, however v{} is available. " "You should consider upgrading your Ricecooker." diff --git a/contentcuration/contentcuration/serializers.py b/contentcuration/contentcuration/serializers.py index 7b39963c0f..c1a6082402 100644 --- a/contentcuration/contentcuration/serializers.py +++ b/contentcuration/contentcuration/serializers.py @@ -26,14 +26,19 @@ class PublicChannelSerializer(serializers.ModelSerializer): """ Called by the public API, primarily used by Kolibri. Contains information more specific to Kolibri's needs. """ - kind_count = serializers.SerializerMethodField('generate_kind_count') - matching_tokens = serializers.SerializerMethodField('match_tokens') - icon_encoding = serializers.SerializerMethodField('get_thumbnail_encoding') - version_notes = serializers.SerializerMethodField('sort_published_data') + + kind_count = serializers.SerializerMethodField("generate_kind_count") + matching_tokens = serializers.SerializerMethodField("match_tokens") + icon_encoding = serializers.SerializerMethodField("get_thumbnail_encoding") + version_notes = serializers.SerializerMethodField("sort_published_data") def match_tokens(self, channel): - tokens = json.loads(channel.tokens) if hasattr(channel, 'tokens') else [] - return list(channel.secret_tokens.filter(token__in=tokens).values_list('token', flat=True)) + tokens = json.loads(channel.tokens) if hasattr(channel, "tokens") else [] + return list( + channel.secret_tokens.filter(token__in=tokens).values_list( + "token", flat=True + ) + ) def get_thumbnail_encoding(self, channel): """ @@ -44,7 +49,7 @@ def get_thumbnail_encoding(self, channel): if channel.icon_encoding: return channel.icon_encoding if channel.thumbnail_encoding: - base64 = channel.thumbnail_encoding.get('base64') + base64 = channel.thumbnail_encoding.get("base64") if base64: return base64 @@ -54,14 +59,27 @@ def generate_kind_count(self, channel): return channel.published_kind_count and json.loads(channel.published_kind_count) def sort_published_data(self, channel): - data = {int(k): v['version_notes'] for k, v in channel.published_data.items()} + data = {int(k): v["version_notes"] for k, v in channel.published_data.items()} return OrderedDict(sorted(data.items())) class Meta: model = Channel - fields = ('id', 'name', 'language', 'included_languages', 'description', 'total_resource_count', 'version', - 'kind_count', 'published_size', 'last_published', 'icon_encoding', 'matching_tokens', 'public', - 'version_notes') + fields = ( + "id", + "name", + "language", + "included_languages", + "description", + "total_resource_count", + "version", + "kind_count", + "published_size", + "last_published", + "icon_encoding", + "matching_tokens", + "public", + "version_notes", + ) class SimplifiedChannelProbeCheckSerializer(serializers.ModelSerializer): @@ -69,13 +87,14 @@ class SimplifiedChannelProbeCheckSerializer(serializers.ModelSerializer): class Meta: model = Channel - fields = ('id', 'name', 'description', 'thumbnail', 'main_tree') + fields = ("id", "name", "description", "thumbnail", "main_tree") class GetTreeDataSerializer(serializers.Serializer): """ Used by get_*_tree_data endpoints to ontain "lightweight" tree data. """ + channel_id = serializers.CharField(required=True) - tree = serializers.CharField(required=False, default='main') + tree = serializers.CharField(required=False, default="main") node_id = serializers.CharField(required=False) diff --git a/contentcuration/contentcuration/settings.py b/contentcuration/contentcuration/settings.py index 595ee834fb..0f18ed0131 100644 --- a/contentcuration/contentcuration/settings.py +++ b/contentcuration/contentcuration/settings.py @@ -15,7 +15,6 @@ import re import sys from datetime import timedelta -from tempfile import gettempdir from django.utils.timezone import now @@ -36,29 +35,34 @@ EXPORT_ROOT = "exports" BETA_MODE = os.getenv("STUDIO_BETA_MODE") -RUNNING_TESTS = (sys.argv[1:2] == ['test'] or os.path.basename(sys.argv[0]) == 'pytest') +RUNNING_TESTS = sys.argv[1:2] == ["test"] or os.path.basename(sys.argv[0]) == "pytest" # hardcoding all this info for now. Potential for shared reference with webpack? WEBPACK_LOADER = { - 'DEFAULT': { + "DEFAULT": { # trailing empty string to include trailing / - 'BUNDLE_DIR_NAME': os.path.join('studio', ''), - 'STATS_FILE': os.path.join(BASE_DIR, 'build', 'webpack-stats.json'), + "BUNDLE_DIR_NAME": os.path.join("studio", ""), + "STATS_FILE": os.path.join(BASE_DIR, "build", "webpack-stats.json"), } } -PERMISSION_TEMPLATE_ROOT = os.path.join(BASE_DIR, "contentcuration", "templates", "permissions") +PERMISSION_TEMPLATE_ROOT = os.path.join( + BASE_DIR, "contentcuration", "templates", "permissions" +) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! -SECRET_KEY = os.getenv("DJANGO_SECRET_KEY") or '_s0k@&o%m6bzg7s(0p(w6z5xbo%vy%mj+xx(w3mhs=f0ve0+h2' +SECRET_KEY = ( + os.getenv("DJANGO_SECRET_KEY") + or "_s0k@&o%m6bzg7s(0p(w6z5xbo%vy%mj+xx(w3mhs=f0ve0+h2" +) # SECURITY WARNING: don't run with debug turned on in production! # DEBUG = True -SESSION_COOKIE_NAME = 'kolibri_studio_sessionid' +SESSION_COOKIE_NAME = "kolibri_studio_sessionid" ALLOWED_HOSTS = ["*"] # In production, we serve through a file socket, so this is OK. @@ -66,207 +70,187 @@ # Application definition INSTALLED_APPS = ( - 'contentcuration.apps.ContentConfig', - 'django.contrib.auth', - 'django.contrib.contenttypes', - 'django.contrib.admin', - 'django.contrib.sessions', - 'django.contrib.messages', - 'django.contrib.sites', - 'django.contrib.staticfiles', - 'rest_framework', - 'django_js_reverse', - 'kolibri_content', - 'readonly', - 'le_utils', - 'rest_framework.authtoken', - 'search', - 'django_s3_storage', - 'webpack_loader', - 'django_filters', - 'mathfilters', - 'django.contrib.postgres', - 'django_celery_results', - 'kolibri_public', + "contentcuration.apps.ContentConfig", + "django.contrib.auth", + "django.contrib.contenttypes", + "django.contrib.admin", + "django.contrib.sessions", + "django.contrib.messages", + "django.contrib.sites", + "django.contrib.staticfiles", + "rest_framework", + "django_js_reverse", + "kolibri_content", + "readonly", + "le_utils", + "rest_framework.authtoken", + "search", + "django_s3_storage", + "webpack_loader", + "django_filters", + "django.contrib.postgres", + "django_celery_results", + "kolibri_public", + "automation", ) SESSION_ENGINE = "django.contrib.sessions.backends.cached_db" REDIS_URL = "redis://:{password}@{endpoint}/".format( password=os.getenv("CELERY_REDIS_PASSWORD") or "", - endpoint=os.getenv("CELERY_BROKER_ENDPOINT") or "localhost:6379") + endpoint=os.getenv("CELERY_BROKER_ENDPOINT") or "localhost:6379", +) CACHE_REDIS_DB = os.getenv("CACHE_REDIS_DB") or "1" CACHES = { - 'default': { - 'BACKEND': 'django_redis.cache.RedisCache', - 'LOCATION': '{url}{db}'.format(url=REDIS_URL, db=CACHE_REDIS_DB), - 'OPTIONS': { - 'CLIENT_CLASS': 'django_redis.client.DefaultClient', - } + "default": { + "BACKEND": "django_redis.cache.RedisCache", + "LOCATION": "{url}{db}".format(url=REDIS_URL, db=CACHE_REDIS_DB), + "OPTIONS": { + "CLIENT_CLASS": "django_redis.client.DefaultClient", + }, } } # READ-ONLY SETTINGS # Set STUDIO_INCIDENT_TYPE to a key from contentcuration.utils.incidents to activate -INCIDENT_TYPE = os.getenv('STUDIO_INCIDENT_TYPE') +INCIDENT_TYPE = os.getenv("STUDIO_INCIDENT_TYPE") INCIDENT = INCIDENTS.get(INCIDENT_TYPE) -SITE_READ_ONLY = INCIDENT and INCIDENT['readonly'] +SITE_READ_ONLY = INCIDENT and INCIDENT["readonly"] # If Studio is in readonly mode, it will throw a DatabaseWriteError # Use a local cache to bypass the readonly property if SITE_READ_ONLY: - CACHES['default']['BACKEND'] = 'django.core.cache.backends.locmem.LocMemCache' - CACHES['default']['LOCATION'] = 'readonly_cache' + CACHES["default"]["BACKEND"] = "django.core.cache.backends.locmem.LocMemCache" + CACHES["default"]["LOCATION"] = "readonly_cache" MIDDLEWARE = ( # 'django.middleware.cache.UpdateCacheMiddleware', - 'contentcuration.middleware.session.KolibriStudioSessionMiddleware', - 'contentcuration.middleware.locale.KolibriStudioLocaleMiddleware', - 'django.middleware.common.CommonMiddleware', - 'django.middleware.common.BrokenLinkEmailsMiddleware', - 'django.middleware.csrf.CsrfViewMiddleware', - 'django.middleware.http.ConditionalGetMiddleware', - 'django.contrib.auth.middleware.AuthenticationMiddleware', - 'django.contrib.messages.middleware.MessageMiddleware', - 'django.middleware.clickjacking.XFrameOptionsMiddleware', - 'django.middleware.security.SecurityMiddleware', - 'contentcuration.middleware.db_readonly.DatabaseReadOnlyMiddleware', + "contentcuration.middleware.session.KolibriStudioSessionMiddleware", + "contentcuration.middleware.locale.KolibriStudioLocaleMiddleware", + "django.middleware.common.CommonMiddleware", + "django.middleware.common.BrokenLinkEmailsMiddleware", + "django.middleware.csrf.CsrfViewMiddleware", + "django.middleware.http.ConditionalGetMiddleware", + "django.contrib.auth.middleware.AuthenticationMiddleware", + "django.contrib.messages.middleware.MessageMiddleware", + "django.middleware.clickjacking.XFrameOptionsMiddleware", + "django.middleware.security.SecurityMiddleware", + "contentcuration.middleware.db_readonly.DatabaseReadOnlyMiddleware", # 'django.middleware.cache.FetchFromCacheMiddleware', ) -if os.getenv("PROFILE_STUDIO_FULL"): - MIDDLEWARE = MIDDLEWARE + ("pyinstrument.middleware.ProfilerMiddleware",) - PYINSTRUMENT_PROFILE_DIR = os.getenv("PROFILE_DIR") or "{}/profile".format( - gettempdir() - ) -elif os.getenv("PROFILE_STUDIO_FILTER"): - MIDDLEWARE = MIDDLEWARE + ("customizable_django_profiler.cProfileMiddleware",) - PROFILER = { - "activate": True, - "output": ["dump", "console"], - "count": "10", - "file_location": os.getenv("PROFILE_DIR") - or "{}/profile/studio".format(gettempdir()), - "trigger": "query_param:{}".format(os.getenv("PROFILE_STUDIO_FILTER")), - } - if os.getenv("GCLOUD_ERROR_REPORTING"): MIDDLEWARE = ( "contentcuration.middleware.error_reporting.ErrorReportingMiddleware", ) + MIDDLEWARE SUPPORTED_BROWSERS = [ - 'Chrome', - 'Firefox', - 'Safari', + "Chrome", + "Firefox", + "Safari", ] -HEALTH_CHECK_BROWSERS = [ - 'kube-probe', - 'GoogleHC', - 'Studio-Internal-Prober' -] +HEALTH_CHECK_BROWSERS = ["kube-probe", "GoogleHC", "Studio-Internal-Prober"] REST_FRAMEWORK = { - 'DEFAULT_PERMISSION_CLASSES': ( - 'rest_framework.permissions.IsAuthenticated', - ), - 'DEFAULT_AUTHENTICATION_CLASSES': ( - 'rest_framework.authentication.SessionAuthentication', + "DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",), + "DEFAULT_AUTHENTICATION_CLASSES": ( + "rest_framework.authentication.SessionAuthentication", # 'rest_framework.authentication.BasicAuthentication', - 'rest_framework.authentication.TokenAuthentication', - ) + "rest_framework.authentication.TokenAuthentication", + ), } -ROOT_URLCONF = 'contentcuration.urls' +ROOT_URLCONF = "contentcuration.urls" TEMPLATES = [ { - 'BACKEND': 'django.template.backends.django.DjangoTemplates', - 'DIRS': ['/templates/'], - 'APP_DIRS': True, - 'OPTIONS': { - 'context_processors': [ - 'django.template.context_processors.debug', - 'django.template.context_processors.request', - 'django.contrib.auth.context_processors.auth', - 'django.contrib.messages.context_processors.messages', - 'readonly.context_processors.readonly', - 'contentcuration.context_processors.site_variables', - 'contentcuration.context_processors.url_tag', + "BACKEND": "django.template.backends.django.DjangoTemplates", + "DIRS": ["/templates/"], + "APP_DIRS": True, + "OPTIONS": { + "context_processors": [ + "django.template.context_processors.debug", + "django.template.context_processors.request", + "django.contrib.auth.context_processors.auth", + "django.contrib.messages.context_processors.messages", + "readonly.context_processors.readonly", + "contentcuration.context_processors.site_variables", + "contentcuration.context_processors.url_tag", ], }, }, ] -WSGI_APPLICATION = 'contentcuration.wsgi.application' +WSGI_APPLICATION = "contentcuration.wsgi.application" # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.postgresql_psycopg2', - 'NAME': os.getenv("DATA_DB_NAME") or 'kolibri-studio', + "default": { + "ENGINE": "django.db.backends.postgresql_psycopg2", + "NAME": os.getenv("DATA_DB_NAME") or "kolibri-studio", # For dev purposes only - 'USER': os.getenv('DATA_DB_USER') or 'learningequality', - 'PASSWORD': os.getenv('DATA_DB_PASS') or 'kolibri', - 'HOST': os.getenv('DATA_DB_HOST') or 'localhost', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. - 'PORT': '', # Set to empty string for default. + "USER": os.getenv("DATA_DB_USER") or "learningequality", + "PASSWORD": os.getenv("DATA_DB_PASS") or "kolibri", + "HOST": os.getenv("DATA_DB_HOST") + or "localhost", # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. + "PORT": "", # Set to empty string for default. }, } -IS_CONTENTNODE_TABLE_PARTITIONED = os.getenv("IS_CONTENTNODE_TABLE_PARTITIONED") or False +IS_CONTENTNODE_TABLE_PARTITIONED = ( + os.getenv("IS_CONTENTNODE_TABLE_PARTITIONED") or False +) DATABASE_ROUTERS = [ "kolibri_content.router.ContentDBRouter", ] LOGGING = { - 'version': 1, - 'disable_existing_loggers': False, - 'handlers': { - 'file': { - 'level': 'DEBUG', - 'class': 'logging.FileHandler', - 'filename': os.getenv('DJANGO_LOG_FILE') or 'django.log' + "version": 1, + "disable_existing_loggers": False, + "handlers": { + "file": { + "level": "DEBUG", + "class": "logging.FileHandler", + "filename": os.getenv("DJANGO_LOG_FILE") or "django.log", }, - 'console': { - 'class': 'logging.StreamHandler', + "console": { + "class": "logging.StreamHandler", }, - 'null': { - 'class': 'logging.NullHandler' - } + "null": {"class": "logging.NullHandler"}, }, - 'loggers': { - 'command': { - 'handlers': ['console'], - 'level': 'DEBUG' if globals().get('DEBUG') else 'INFO', - 'propagate': True, + "loggers": { + "command": { + "handlers": ["console"], + "level": "DEBUG" if globals().get("DEBUG") else "INFO", + "propagate": True, }, - 'django': { - 'handlers': ['file', 'console'], - 'level': 'DEBUG' if globals().get('DEBUG') else 'INFO', - 'propagate': True, + "django": { + "handlers": ["file", "console"], + "level": "DEBUG" if globals().get("DEBUG") else "INFO", + "propagate": True, }, - 'django.db.backends': { - 'handlers': ['null'], - 'propagate': False, - 'level': 'DEBUG' - } - } + "django.db.backends": { + "handlers": ["null"], + "propagate": False, + "level": "DEBUG", + }, + }, } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ -LANGUAGE_CODE = 'en' +LANGUAGE_CODE = "en" -TIME_ZONE = 'UTC' +TIME_ZONE = "UTC" USE_I18N = True @@ -274,9 +258,7 @@ USE_TZ = True -LOCALE_PATHS = ( - os.path.join(BASE_DIR, 'locale'), -) +LOCALE_PATHS = (os.path.join(BASE_DIR, "locale"),) def gettext(s): @@ -284,40 +266,40 @@ def gettext(s): LANGUAGES = ( - ('en', gettext('English')), - ('es-es', gettext('Spanish')), - ('ar', gettext('Arabic')), - ('fr-fr', gettext('French')), - ('pt-br', gettext('Portuguese')), + ("en", gettext("English")), + ("es-es", gettext("Spanish")), + ("ar", gettext("Arabic")), + ("fr-fr", gettext("French")), + ("pt-br", gettext("Portuguese")), # ('en-PT', gettext('English - Pirate')), ) PRODUCTION_SITE_ID = 1 SITE_BY_ID = { - 'master': PRODUCTION_SITE_ID, - 'unstable': 3, - 'hotfixes': 4, + "master": PRODUCTION_SITE_ID, + "unstable": 3, + "hotfixes": 4, } # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ -STATIC_URL = '/static/' +STATIC_URL = "/static/" -STORAGE_URL = '/content/storage/' +STORAGE_URL = "/content/storage/" -CONTENT_DATABASE_URL = '/content/databases/' +CONTENT_DATABASE_URL = "/content/databases/" -CSV_URL = '/content/csvs/' +CSV_URL = "/content/csvs/" -LOGIN_REDIRECT_URL = '/channels/' -LOGIN_URL = '/accounts/' +LOGIN_REDIRECT_URL = "/channels/" +LOGIN_URL = "/accounts/" -AUTH_USER_MODEL = 'contentcuration.User' +AUTH_USER_MODEL = "contentcuration.User" ACCOUNT_ACTIVATION_DAYS = 7 REGISTRATION_OPEN = True -SITE_ID = SITE_BY_ID.get(os.getenv('BRANCH_ENVIRONMENT'), 1) +SITE_ID = SITE_BY_ID.get(os.getenv("BRANCH_ENVIRONMENT"), 1) # Used for serializing datetime objects. DATE_TIME_FORMAT = "%Y-%m-%d %H:%M:%S" @@ -326,11 +308,11 @@ def gettext(s): SEND_USER_ACTIVATION_NOTIFICATION_EMAIL = bool( os.getenv("SEND_USER_ACTIVATION_NOTIFICATION_EMAIL") ) -SPACE_REQUEST_EMAIL = 'content@learningequality.org' -REGISTRATION_INFORMATION_EMAIL = 'studio-registrations@learningequality.org' -HELP_EMAIL = 'content@learningequality.org' -DEFAULT_FROM_EMAIL = 'Kolibri Studio ' -POLICY_EMAIL = 'legal@learningequality.org' +SPACE_REQUEST_EMAIL = "content@learningequality.org" +REGISTRATION_INFORMATION_EMAIL = "studio-registrations@learningequality.org" +HELP_EMAIL = "content@learningequality.org" +DEFAULT_FROM_EMAIL = "Kolibri Studio " +POLICY_EMAIL = "legal@learningequality.org" # Used to determine how many days a user # has to undo accidentally deleting account. @@ -338,33 +320,30 @@ def gettext(s): DEFAULT_LICENSE = 1 -SERVER_EMAIL = 'curation-errors@learningequality.org' -ADMINS = [('Errors', SERVER_EMAIL)] +SERVER_EMAIL = "curation-errors@learningequality.org" +ADMINS = [("Errors", SERVER_EMAIL)] DEFAULT_TITLE = "Kolibri Studio" IGNORABLE_404_URLS = [ - re.compile(r'\.(php|cgi)$'), - re.compile(r'^/phpmyadmin/'), - re.compile(r'^/apple-touch-icon.*\.png$'), - re.compile(r'^/favicon\.ico$'), - re.compile(r'^/robots\.txt$'), + re.compile(r"\.(php|cgi)$"), + re.compile(r"^/phpmyadmin/"), + re.compile(r"^/apple-touch-icon.*\.png$"), + re.compile(r"^/favicon\.ico$"), + re.compile(r"^/robots\.txt$"), ] # CELERY CONFIGURATIONS CELERY_REDIS_DB = os.getenv("CELERY_REDIS_DB") or "0" CELERY = { - "broker_url": "{url}{db}".format( - url=REDIS_URL, - db=CELERY_REDIS_DB - ), + "broker_url": "{url}{db}".format(url=REDIS_URL, db=CELERY_REDIS_DB), # with a redis broker, tasks will be re-sent if not completed within the duration of this timeout "broker_transport_options": {"visibility_timeout": 4 * 3600}, "redis_db": CELERY_REDIS_DB, "result_backend": "django-db", "redis_backend_health_check_interval": 600, - "timezone": os.getenv("CELERY_TIMEZONE") or 'Africa/Nairobi', - "accept_content": ['application/json'], + "timezone": os.getenv("CELERY_TIMEZONE") or "Africa/Nairobi", + "accept_content": ["application/json"], "task_serializer": "json", "result_serializer": "json", "result_extended": True, @@ -378,11 +357,11 @@ def gettext(s): ORPHAN_DATE_CLEAN_UP_THRESHOLD = TWO_WEEKS_AGO # CLOUD STORAGE SETTINGS -DEFAULT_FILE_STORAGE = 'django_s3_storage.storage.S3Storage' -AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID') or 'development' -AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY') or 'development' -AWS_S3_BUCKET_NAME = os.getenv('AWS_BUCKET_NAME') or 'content' -AWS_S3_ENDPOINT_URL = os.getenv('AWS_S3_ENDPOINT_URL') or 'http://localhost:9000' +DEFAULT_FILE_STORAGE = "django_s3_storage.storage.S3Storage" +AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID") or "development" +AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY") or "development" +AWS_S3_BUCKET_NAME = os.getenv("AWS_BUCKET_NAME") or "content" +AWS_S3_ENDPOINT_URL = os.getenv("AWS_S3_ENDPOINT_URL") or "http://localhost:9000" AWS_AUTO_CREATE_BUCKET = False AWS_S3_FILE_OVERWRITE = True AWS_S3_BUCKET_AUTH = False @@ -391,7 +370,9 @@ def gettext(s): # defaults to what's inferred from the environment. See # https://cloud.google.com/docs/authentication/production # for how these credentials are inferred automatically. -GCS_STORAGE_SERVICE_ACCOUNT_KEY_PATH = os.getenv("GOOGLE_CLOUD_STORAGE_SERVICE_ACCOUNT_CREDENTIALS") +GCS_STORAGE_SERVICE_ACCOUNT_KEY_PATH = os.getenv( + "GOOGLE_CLOUD_STORAGE_SERVICE_ACCOUNT_CREDENTIALS" +) # GOOGLE DRIVE SETTINGS GOOGLE_AUTH_JSON = "credentials/client_secret.json" @@ -418,13 +399,14 @@ def gettext(s): if key: key = key.strip() # strip any possible whitespace or trailing newline -SENTRY_DSN = 'https://{secret}@sentry.io/1252819'.format(secret=key) if key else None +SENTRY_DSN = "https://{secret}@sentry.io/1252819".format(secret=key) if key else None SENTRY_ENVIRONMENT = get_secret("BRANCH_ENVIRONMENT") SENTRY_RELEASE = os.environ.get("RELEASE_COMMIT_SHA") SENTRY_ACTIVE = False if SENTRY_DSN and SENTRY_RELEASE and SENTRY_ENVIRONMENT: import sentry_sdk + # TODO: there are also Celery and Redis integrations, but since they are new # I left them as a separate task so we can spend more time on testing. from sentry_sdk.integrations.django import DjangoIntegration @@ -443,3 +425,6 @@ def gettext(s): DEFAULT_AUTO_FIELD = "django.db.models.AutoField" LANGUAGE_COOKIE_AGE = 3600 * 24 * 14 + +# Curriculum Automation Settings +CURRICULUM_AUTOMATION_API_URL = os.getenv("CURRICULUM_AUTOMATION_API_URL") diff --git a/contentcuration/contentcuration/signals.py b/contentcuration/contentcuration/signals.py index e96446569f..c724565306 100644 --- a/contentcuration/contentcuration/signals.py +++ b/contentcuration/contentcuration/signals.py @@ -10,7 +10,7 @@ def set_jit(sender, connection, **kwargs): optimize its use. https://www.postgresql.org/docs/12/runtime-config-query.html#GUC-JIT """ - if connection.vendor == 'postgresql': + if connection.vendor == "postgresql": db_features = DatabaseFeatures(connection) # JIT is new in v11, and for reference this returns True for v11 and following if db_features.is_postgresql_11: diff --git a/contentcuration/contentcuration/static/feature_flags.json b/contentcuration/contentcuration/static/feature_flags.json index c5567d2451..93653fae13 100644 --- a/contentcuration/contentcuration/static/feature_flags.json +++ b/contentcuration/contentcuration/static/feature_flags.json @@ -13,6 +13,11 @@ "type": "boolean", "title":"Test AI feature", "description": "Allow user access to AI features" + }, + "survey":{ + "type": "boolean", + "title":"Test Survey feature", + "description": "Allow user access to Survey" } }, "examples": [ diff --git a/contentcuration/contentcuration/tasks.py b/contentcuration/contentcuration/tasks.py index 39f89805ce..129cd78302 100644 --- a/contentcuration/contentcuration/tasks.py +++ b/contentcuration/contentcuration/tasks.py @@ -3,9 +3,6 @@ `contentcuration.utils.celery.tasks.CeleryTask`. See the methods of that class for enqueuing and fetching results of the tasks. """ -from __future__ import absolute_import -from __future__ import unicode_literals - import logging import time @@ -35,7 +32,10 @@ def apply_user_changes_task(self, user_id): :param user_id: The user ID for which to process changes """ from contentcuration.viewsets.sync.base import apply_changes - changes_qs = Change.objects.filter(applied=False, errored=False, user_id=user_id, channel__isnull=True) + + changes_qs = Change.objects.filter( + applied=False, errored=False, user_id=user_id, channel__isnull=True + ) apply_changes(changes_qs) if changes_qs.exists(): self.requeue() @@ -48,7 +48,10 @@ def apply_channel_changes_task(self, channel_id): :param channel_id: The channel ID for which to process changes """ from contentcuration.viewsets.sync.base import apply_changes - changes_qs = Change.objects.filter(applied=False, errored=False, channel_id=channel_id) + + changes_qs = Change.objects.filter( + applied=False, errored=False, channel_id=channel_id + ) apply_changes(changes_qs) if changes_qs.exists(): self.requeue() @@ -56,13 +59,14 @@ def apply_channel_changes_task(self, channel_id): class CustomEmailMessage(EmailMessage): """ - jayoshih: There's an issue with the django postmark backend where - _build_message attempts to attach files as base64. However, - the django EmailMessage attach method makes all content with a text/* - mimetype to be encoded as a string, causing `base64.b64encode(content)` - to fail. This is a workaround to ensure that content is still encoded as - bytes when it comes to encoding the attachment as base64 + jayoshih: There's an issue with the django postmark backend where + _build_message attempts to attach files as base64. However, + the django EmailMessage attach method makes all content with a text/* + mimetype to be encoded as a string, causing `base64.b64encode(content)` + to fail. This is a workaround to ensure that content is still encoded as + bytes when it comes to encoding the attachment as base64 """ + def attach(self, filename=None, content=None, mimetype=None): if filename is None: raise AssertionError @@ -79,6 +83,7 @@ def generateusercsv_task(user_id, language=settings.LANGUAGE_CODE): user = User.objects.get(pk=user_id) csv_path = write_user_csv(user) subject = render_to_string("export/user_csv_email_subject.txt", {}) + subject = "".join(subject.splitlines()) message = render_to_string( "export/user_csv_email.txt", { @@ -89,8 +94,10 @@ def generateusercsv_task(user_id, language=settings.LANGUAGE_CODE): }, ) - email = CustomEmailMessage(subject, message, settings.DEFAULT_FROM_EMAIL, [user.email]) - email.encoding = 'utf-8' + email = CustomEmailMessage( + subject, message, settings.DEFAULT_FROM_EMAIL, [user.email] + ) + email.encoding = "utf-8" email.attach_file(csv_path, mimetype="text/csv") email.send() @@ -118,7 +125,11 @@ def calculate_user_storage_task(user_id): user = User.objects.get(pk=user_id) user.set_space_used() except User.DoesNotExist: - logging.error("Tried to calculate user storage for user with id {} but they do not exist".format(user_id)) + logging.error( + "Tried to calculate user storage for user with id {} but they do not exist".format( + user_id + ) + ) @app.task(name="calculate_resource_size_task") @@ -130,10 +141,21 @@ def calculate_resource_size_task(node_id, channel_id): @app.task(name="sendcustomemails_task") def sendcustomemails_task(subject, message, query): - subject = render_to_string('registration/custom_email_subject.txt', {'subject': subject}) + subject = render_to_string( + "registration/custom_email_subject.txt", {"subject": subject} + ) + subject = "".join(subject.splitlines()) recipients = AdminUserFilter(data=query).qs.distinct() for recipient in recipients: - text = message.format(current_date=time.strftime("%A, %B %d"), current_time=time.strftime("%H:%M %Z"), **recipient.__dict__) - text = render_to_string('registration/custom_email.txt', {'message': text}) - recipient.email_user(subject, text, settings.DEFAULT_FROM_EMAIL, ) + text = message.format( + current_date=time.strftime("%A, %B %d"), + current_time=time.strftime("%H:%M %Z"), + **recipient.__dict__ + ) + text = render_to_string("registration/custom_email.txt", {"message": text}) + recipient.email_user( + subject, + text, + settings.DEFAULT_FROM_EMAIL, + ) diff --git a/contentcuration/contentcuration/templates/base.html b/contentcuration/contentcuration/templates/base.html index 8137cc54d5..82108ac00e 100644 --- a/contentcuration/contentcuration/templates/base.html +++ b/contentcuration/contentcuration/templates/base.html @@ -122,11 +122,11 @@ {% if INCIDENT %}
- error {{INCIDENT.message | safe}} + {{INCIDENT.message | safe}}
{% elif DEPRECATED %}
- error {% blocktrans %}Contentworkshop.learningequality.org has been deprecated. Please go to studio.learningequality.org for the latest version of Studio{% endblocktrans %} + {% blocktrans %}Contentworkshop.learningequality.org has been deprecated. Please go to studio.learningequality.org for the latest version of Studio{% endblocktrans %}
{% endif %} {% endblock nav %} diff --git a/contentcuration/contentcuration/templates/contentcuration/editor_dev.html b/contentcuration/contentcuration/templates/contentcuration/editor_dev.html new file mode 100644 index 0000000000..d41b1410e6 --- /dev/null +++ b/contentcuration/contentcuration/templates/contentcuration/editor_dev.html @@ -0,0 +1,8 @@ +{% extends "base.html" %} + +{% block content %} +
+ + + +{% endblock %} diff --git a/contentcuration/contentcuration/templates/permissions/permissions_email_subject.txt b/contentcuration/contentcuration/templates/permissions/permissions_email_subject.txt index 7304527c22..4c20de29f0 100644 --- a/contentcuration/contentcuration/templates/permissions/permissions_email_subject.txt +++ b/contentcuration/contentcuration/templates/permissions/permissions_email_subject.txt @@ -1 +1 @@ -{% load i18n %}{% if share_mode == 'edit' %}{% blocktrans with channel=channel %}You've been invited to edit {{ channel }}{% endblocktrans %}{% else %}{% blocktrans with channel=channel %}You've been invited to view {{ channel }}{% endblocktrans %}{% endif %} \ No newline at end of file +{% load i18n %}{% if share_mode == 'edit' %}{% blocktrans with channel=channel %}You've been invited to edit {{ channel }}{% endblocktrans %}{% else %}{% blocktrans with channel=channel %}You've been invited to view {{ channel }}{% endblocktrans %}{% endif %} diff --git a/contentcuration/contentcuration/templates/perseus/exercise.json b/contentcuration/contentcuration/templates/perseus/exercise.json index 44e287e571..4408a80bb1 100644 --- a/contentcuration/contentcuration/templates/perseus/exercise.json +++ b/contentcuration/contentcuration/templates/perseus/exercise.json @@ -1 +1 @@ -{{exercise | safe}} \ No newline at end of file +{{exercise | safe}} diff --git a/contentcuration/contentcuration/templates/registration/custom_email_subject.txt b/contentcuration/contentcuration/templates/registration/custom_email_subject.txt index ad591957af..82fd21ed2a 100644 --- a/contentcuration/contentcuration/templates/registration/custom_email_subject.txt +++ b/contentcuration/contentcuration/templates/registration/custom_email_subject.txt @@ -1 +1 @@ -{% load i18n %} {{ subject }} \ No newline at end of file +{% load i18n %} {{ subject }} diff --git a/contentcuration/contentcuration/templates/registration/registration_information_email.txt b/contentcuration/contentcuration/templates/registration/registration_information_email.txt index 314ec38ae2..b165e8a93e 100644 --- a/contentcuration/contentcuration/templates/registration/registration_information_email.txt +++ b/contentcuration/contentcuration/templates/registration/registration_information_email.txt @@ -11,4 +11,4 @@ Storage Needed: {{information.space_needed}}{% endif %} Location(s): {{information.locations | join:", "}} Heard about us from: {{information.heard_from}} -{% endautoescape %} \ No newline at end of file +{% endautoescape %} diff --git a/contentcuration/contentcuration/templatetags/export_tags.py b/contentcuration/contentcuration/templatetags/export_tags.py index 4d82025abc..4003c4606a 100644 --- a/contentcuration/contentcuration/templatetags/export_tags.py +++ b/contentcuration/contentcuration/templatetags/export_tags.py @@ -12,20 +12,22 @@ THUMBNAIL_DIMENSION = 200 # PDFs where encoding returns None will fail, so use this in case images aren't found -DEFAULT_ENCODING = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/" \ - "9hAAACk0lEQVR4AaWTA7TbbABA8/+zreMdzbYOZtu2bbt4rpPUtvlebbezbdvMvsxmG99740" \ - "CDF6z4p/G3RYkFLQPGmvj8mx30m7uo1LhNO6ou50r++zrkMoj/cRWUJmIz0gvEDXIVvP/Hbd" \ - "xRte+chaXe7gDDsP9WwqLJixicgqWwsNrncZFJ2UnmM+Xy1awlqDz/LVsKC6oDtxA0k/B1aD" \ - "Oi6rMBVVi2ys1Td+qd5NU8ZV0cWEKeWsZ4IKbdn3ikOJTogm9bw1PWw50twAWNFbS9oK1UlX" \ - "Y337KA6sxwiBb/NIJYM3KrRNOSppD1YNtM9wwHUs+S188M38hXtCKKNSOAM4PmzKCgWQhaNU" \ - "SiGCIE1DKGYozyJc5EW47ZZ2Ka3U0oNieTbLNjruOHsCO3LvNgq6cZznAHuAICah5DohjDUEG" \ - "+OciQRsbQlFGKUOvrw9d6uSiiKcu3h9S86F7Me/oMtv/yFVsofaQCYHyhxtcLuFSGNDwatCGI" \ - "SrZE6EzXIJYkoqILPR0k2oCMo/b1EOpcQqEnjkXPnseOX71uEuqDvQCTAqfjW5fhGkQlWyMQf" \ - "acZYRHs61jc4HKOJAGXBE+1F1vjdRiwegEstrywB9OYK5zdITZH6xUHTnUADgLcpaBZD1omxCY" \ - "5m6K7HRaEUDxDZjoyWOs9Xwu/43lbWTUKSfwwzNGfROX2hvg2wGrLjEcGIwTHTHR3sQW0jSEcIN" \ - "tsnembjYu2z0fKfngHaEXm2jzYmXaUHL7k3H+z6YftOxagZXEXNJ2+eJV3zGF/8RZyWZ6RakH8ad" \ - "Z9AksmLmz6nO2cy/3vl9+CnJdYZJRmn+x1HsOOh07BkcTF0p/z39hBuoJNuW9U2nF01rngydo/+xr" \ - "/aXwDY2vpQfdHLrIAAAAASUVORK5CYII=" +DEFAULT_ENCODING = ( + "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/" + "9hAAACk0lEQVR4AaWTA7TbbABA8/+zreMdzbYOZtu2bbt4rpPUtvlebbezbdvMvsxmG99740" + "CDF6z4p/G3RYkFLQPGmvj8mx30m7uo1LhNO6ou50r++zrkMoj/cRWUJmIz0gvEDXIVvP/Hbd" + "xRte+chaXe7gDDsP9WwqLJixicgqWwsNrncZFJ2UnmM+Xy1awlqDz/LVsKC6oDtxA0k/B1aD" + "Oi6rMBVVi2ys1Td+qd5NU8ZV0cWEKeWsZ4IKbdn3ikOJTogm9bw1PWw50twAWNFbS9oK1UlX" + "Y337KA6sxwiBb/NIJYM3KrRNOSppD1YNtM9wwHUs+S188M38hXtCKKNSOAM4PmzKCgWQhaNU" + "SiGCIE1DKGYozyJc5EW47ZZ2Ka3U0oNieTbLNjruOHsCO3LvNgq6cZznAHuAICah5DohjDUEG" + "+OciQRsbQlFGKUOvrw9d6uSiiKcu3h9S86F7Me/oMtv/yFVsofaQCYHyhxtcLuFSGNDwatCGI" + "SrZE6EzXIJYkoqILPR0k2oCMo/b1EOpcQqEnjkXPnseOX71uEuqDvQCTAqfjW5fhGkQlWyMQf" + "acZYRHs61jc4HKOJAGXBE+1F1vjdRiwegEstrywB9OYK5zdITZH6xUHTnUADgLcpaBZD1omxCY" + "5m6K7HRaEUDxDZjoyWOs9Xwu/43lbWTUKSfwwzNGfROX2hvg2wGrLjEcGIwTHTHR3sQW0jSEcIN" + "tsnembjYu2z0fKfngHaEXm2jzYmXaUHL7k3H+z6YftOxagZXEXNJ2+eJV3zGF/8RZyWZ6RakH8ad" + "Z9AksmLmz6nO2cy/3vl9+CnJdYZJRmn+x1HsOOh07BkcTF0p/z39hBuoJNuW9U2nF01rngydo/+xr" + "/aXwDY2vpQfdHLrIAAAAASUVORK5CYII=" +) register = template.Library() logmodule.basicConfig() @@ -42,11 +44,15 @@ def encode_base64(value, dimension=THUMBNAIL_DIMENSION): return get_thumbnail_encoding(value, dimension=dimension) except IOError: try: - filepath = os.path.join(settings.STATIC_ROOT, 'img', 'kolibri_placeholder.png') + filepath = os.path.join( + settings.STATIC_ROOT, "img", "kolibri_placeholder.png" + ) - with open(filepath, 'rb') as image_file: + with open(filepath, "rb") as image_file: _, ext = os.path.splitext(value) - return "data:image/{};base64,{}".format(ext[1:], base64.b64encode(image_file.read())) + return "data:image/{};base64,{}".format( + ext[1:], base64.b64encode(image_file.read()) + ) except IOError: logging.warning("Could not find {}".format(value)) return DEFAULT_ENCODING @@ -59,13 +65,15 @@ def encode_static_base64(value, dimension=None): if value.startswith(settings.STATIC_URL): value = os.path.basename(value) - filepath = os.path.join(settings.STATIC_ROOT, 'img', value) + filepath = os.path.join(settings.STATIC_ROOT, "img", value) if dimension: return get_thumbnail_encoding(filepath, dimension=int(dimension)) - with open(filepath, 'rb') as image_file: + with open(filepath, "rb") as image_file: _, ext = os.path.splitext(value) - return "data:image/{};base64,{}".format(ext[1:], base64.b64encode(image_file.read())) + return "data:image/{};base64,{}".format( + ext[1:], base64.b64encode(image_file.read()) + ) except IOError: logging.warning("Could not find {}".format(value)) return DEFAULT_ENCODING diff --git a/contentcuration/contentcuration/templatetags/license_tags.py b/contentcuration/contentcuration/templatetags/license_tags.py index dd68ca07fa..89331cf141 100644 --- a/contentcuration/contentcuration/templatetags/license_tags.py +++ b/contentcuration/contentcuration/templatetags/license_tags.py @@ -8,47 +8,66 @@ LICENSE_MAPPING = None -DESCRIPTION_MAPPING = {"CC BY": _("The Attribution License lets others distribute, " - "remix, tweak, and build upon your work, even commercially, " - "as long as they credit you for the original creation. This " - "is the most accommodating of licenses offered. Recommended " - "for maximum dissemination and use of licensed materials."), - "CC BY-SA": _("The Attribution-ShareAlike License lets others remix," - " tweak, and build upon your work even for commercial purposes," - " as long as they credit you and license their new creations " - "under the identical terms. This license is often compared to " - "\"copyleft\" free and open source software licenses. All new " - "works based on yours will carry the same license, so any " - "derivatives will also allow commercial use. This is the " - "license used by Wikipedia, and is recommended for materials " - "that would benefit from incorporating content from Wikipedia " - "and similarly licensed projects."), - "CC BY-ND": _("The Attribution-NoDerivs License allows for redistribution," - " commercial and non-commercial, as long as it is passed along " - "unchanged and in whole, with credit to you."), - "CC BY-NC": _("The Attribution-NonCommercial License lets others remix, " - "tweak, and build upon your work non-commercially, and although " - "their new works must also acknowledge you and be non-commercial, " - "they don't have to license their derivative works on the same terms."), - "CC BY-NC-SA": _("The Attribution-NonCommercial-ShareAlike License lets " - "others remix, tweak, and build upon your work non-commercially, " - "as long as they credit you and license their new creations under " - "the identical terms."), - "CC BY-NC-ND": _("The Attribution-NonCommercial-NoDerivs License is the " - "most restrictive of our six main licenses, only allowing others " - "to download your works and share them with others as long as they " - "credit you, but they can't change them in any way or use them commercially."), - "All Rights Reserved": _("The All Rights Reserved License indicates that " - "the copyright holder reserves, or holds for their own use, all " - "the rights provided by copyright law under one specific copyright treaty."), - "Public Domain": _("Public Domain work has been identified as being free " - "of known restrictions under copyright law, including all related " - "and neighboring rights."), - "Special Permissions": _("Special Permissions is a custom license to use" - " when the current licenses do not apply to the content. The " - "owner of this license is responsible for creating a description " - "of what this license entails."), - } +DESCRIPTION_MAPPING = { + "CC BY": _( + "The Attribution License lets others distribute, " + "remix, tweak, and build upon your work, even commercially, " + "as long as they credit you for the original creation. This " + "is the most accommodating of licenses offered. Recommended " + "for maximum dissemination and use of licensed materials." + ), + "CC BY-SA": _( + "The Attribution-ShareAlike License lets others remix," + " tweak, and build upon your work even for commercial purposes," + " as long as they credit you and license their new creations " + "under the identical terms. This license is often compared to " + '"copyleft" free and open source software licenses. All new ' + "works based on yours will carry the same license, so any " + "derivatives will also allow commercial use. This is the " + "license used by Wikipedia, and is recommended for materials " + "that would benefit from incorporating content from Wikipedia " + "and similarly licensed projects." + ), + "CC BY-ND": _( + "The Attribution-NoDerivs License allows for redistribution," + " commercial and non-commercial, as long as it is passed along " + "unchanged and in whole, with credit to you." + ), + "CC BY-NC": _( + "The Attribution-NonCommercial License lets others remix, " + "tweak, and build upon your work non-commercially, and although " + "their new works must also acknowledge you and be non-commercial, " + "they don't have to license their derivative works on the same terms." + ), + "CC BY-NC-SA": _( + "The Attribution-NonCommercial-ShareAlike License lets " + "others remix, tweak, and build upon your work non-commercially, " + "as long as they credit you and license their new creations under " + "the identical terms." + ), + "CC BY-NC-ND": _( + "The Attribution-NonCommercial-NoDerivs License is the " + "most restrictive of our six main licenses, only allowing others " + "to download your works and share them with others as long as they " + "credit you, but they can't change them in any way or use them commercially." + ), + "All Rights Reserved": _( + "The All Rights Reserved License indicates that " + "the copyright holder reserves, or holds for their own use, all " + "the rights provided by copyright law under one specific copyright treaty." + ), + "Public Domain": _( + "Public Domain work has been identified as being free " + "of known restrictions under copyright law, including all related " + "and neighboring rights." + ), + "Special Permissions": _( + "Special Permissions is a custom license to use" + " when the current licenses do not apply to the content. The " + "owner of this license is responsible for creating a description " + "of what this license entails." + ), +} @register.filter(is_safe=True) @@ -56,7 +75,9 @@ def get_license_url(value): global LICENSE_MAPPING if not LICENSE_MAPPING: - LICENSE_MAPPING = {lic.license_name: lic.license_url for lic in License.objects.all()} + LICENSE_MAPPING = { + lic.license_name: lic.license_url for lic in License.objects.all() + } return LICENSE_MAPPING.get(value) @@ -64,4 +85,7 @@ def get_license_url(value): @register.filter(is_safe=True) @stringfilter def get_license_description(value): - return DESCRIPTION_MAPPING.get(value) or License.objects.get(license_name=value).description + return ( + DESCRIPTION_MAPPING.get(value) + or License.objects.get(license_name=value).description + ) diff --git a/contentcuration/contentcuration/templatetags/perseus_tags.py b/contentcuration/contentcuration/templatetags/perseus_tags.py index 3f3c223aec..24cea74df3 100644 --- a/contentcuration/contentcuration/templatetags/perseus_tags.py +++ b/contentcuration/contentcuration/templatetags/perseus_tags.py @@ -1,4 +1,5 @@ import json + from django import template from django.template.defaultfilters import stringfilter diff --git a/contentcuration/contentcuration/templatetags/translation_tags.py b/contentcuration/contentcuration/templatetags/translation_tags.py index c6a71df2e2..5a1fc51b2c 100644 --- a/contentcuration/contentcuration/templatetags/translation_tags.py +++ b/contentcuration/contentcuration/templatetags/translation_tags.py @@ -9,22 +9,24 @@ @register.simple_tag -def render_bundle_css(bundle_name, config='DEFAULT', attrs=''): +def render_bundle_css(bundle_name, config="DEFAULT", attrs=""): """ A tag to conditionally load css depending on whether the page is being rendered for an LTR or RTL language. Using webpack-rtl-plugin, we now have two css files for every bundle. One that just ends in .css for LTR, and the other that ends in .rtl.css for RTL. This will conditionally load the correct one depending on the current language setting. """ - bidi = get_language_info(get_language())['bidi'] - files = utils.get_files(bundle_name, extension='css', config=config) + bidi = get_language_info(get_language())["bidi"] + files = utils.get_files(bundle_name, extension="css", config=config) if bidi: - files = [x for x in files if x['name'].endswith('rtl.css')] + files = [x for x in files if x["name"].endswith("rtl.css")] else: - files = [x for x in files if not x['name'].endswith('rtl.css')] + files = [x for x in files if not x["name"].endswith("rtl.css")] tags = [] for chunk in files: - tags.append(( - '' - ).format(chunk['url'], attrs)) - return mark_safe('\n'.join(tags)) + tags.append( + ('').format( + chunk["url"], attrs + ) + ) + return mark_safe("\n".join(tags)) diff --git a/contentcuration/contentcuration/tests/base.py b/contentcuration/contentcuration/tests/base.py index ad110c7302..1189e0e6f9 100644 --- a/contentcuration/contentcuration/tests/base.py +++ b/contentcuration/contentcuration/tests/base.py @@ -1,7 +1,3 @@ -from __future__ import absolute_import - -from builtins import str - from django.conf import settings from django.core.files.uploadedfile import SimpleUploadedFile from django.core.management import call_command @@ -72,6 +68,9 @@ class StudioAPITestCase(APITestCase): def setUpClass(cls): super(StudioAPITestCase, cls).setUpClass() call_command("loadconstants") + cls.admin_user = User.objects.create_superuser( + "big_shot", "bigshot@reallybigcompany.com", "password" + ) def sign_in(self, user=None): if not user: diff --git a/contentcuration/contentcuration/tests/db/test_advisory_lock.py b/contentcuration/contentcuration/tests/db/test_advisory_lock.py index 63a3650b46..14009f8392 100644 --- a/contentcuration/contentcuration/tests/db/test_advisory_lock.py +++ b/contentcuration/contentcuration/tests/db/test_advisory_lock.py @@ -8,7 +8,9 @@ from django.db import transaction from django.test.testcases import SimpleTestCase -from django_concurrent_tests.management.commands.concurrent_call_wrapper import use_test_databases +from django_concurrent_tests.management.commands.concurrent_call_wrapper import ( + use_test_databases, +) from mock import mock from mock import patch from pytest import mark @@ -25,35 +27,197 @@ # flake8: noqa -@mark.parametrize("key1, key2, unlock, session, shared, wait, expected_query", [ - # transaction level - (1, None, False, False, False, True, "SELECT pg_advisory_xact_lock(%s) AS lock;"), - (3, None, False, False, True, True, "SELECT pg_advisory_xact_lock_shared(%s) AS lock;"), - (4, None, False, False, True, False, "SELECT pg_try_advisory_xact_lock_shared(%s) AS lock;"), - (5, None, False, False, False, False, "SELECT pg_try_advisory_xact_lock(%s) AS lock;"), - (6, 1, False, False, False, True, "SELECT pg_advisory_xact_lock(%s, %s) AS lock;"), - (7, 2, False, False, True, True, "SELECT pg_advisory_xact_lock_shared(%s, %s) AS lock;"), - (8, 3, False, False, True, False, "SELECT pg_try_advisory_xact_lock_shared(%s, %s) AS lock;"), - (9, 4, False, False, False, False, "SELECT pg_try_advisory_xact_lock(%s, %s) AS lock;"), - - # session level - (10, None, False, True, False, True, "SELECT pg_advisory_lock(%s) AS lock;"), - (11, None, True, True, False, True, "SELECT pg_advisory_unlock(%s) AS lock;"), - (12, None, False, True, True, True, "SELECT pg_advisory_lock_shared(%s) AS lock;"), - (13, None, True, True, True, True, "SELECT pg_advisory_unlock_shared(%s) AS lock;"), - (14, None, False, True, False, False, "SELECT pg_try_advisory_lock(%s) AS lock;"), - (15, None, True, True, False, False, "SELECT pg_try_advisory_unlock(%s) AS lock;"), - (16, None, False, True, True, False, "SELECT pg_try_advisory_lock_shared(%s) AS lock;"), - (17, None, True, True, True, False, "SELECT pg_try_advisory_unlock_shared(%s) AS lock;"), - (18, 1, False, True, False, True, "SELECT pg_advisory_lock(%s, %s) AS lock;"), - (19, 2, True, True, False, True, "SELECT pg_advisory_unlock(%s, %s) AS lock;"), - (20, 3, False, True, True, True, "SELECT pg_advisory_lock_shared(%s, %s) AS lock;"), - (21, 4, True, True, True, True, "SELECT pg_advisory_unlock_shared(%s, %s) AS lock;"), - (22, 5, False, True, False, False, "SELECT pg_try_advisory_lock(%s, %s) AS lock;"), - (23, 6, True, True, False, False, "SELECT pg_try_advisory_unlock(%s, %s) AS lock;"), - (24, 7, False, True, True, False, "SELECT pg_try_advisory_lock_shared(%s, %s) AS lock;"), - (25, 8, True, True, True, False, "SELECT pg_try_advisory_unlock_shared(%s, %s) AS lock;"), -]) +@mark.parametrize( + "key1, key2, unlock, session, shared, wait, expected_query", + [ + # transaction level + ( + 1, + None, + False, + False, + False, + True, + "SELECT pg_advisory_xact_lock(%s) AS lock;", + ), + ( + 3, + None, + False, + False, + True, + True, + "SELECT pg_advisory_xact_lock_shared(%s) AS lock;", + ), + ( + 4, + None, + False, + False, + True, + False, + "SELECT pg_try_advisory_xact_lock_shared(%s) AS lock;", + ), + ( + 5, + None, + False, + False, + False, + False, + "SELECT pg_try_advisory_xact_lock(%s) AS lock;", + ), + ( + 6, + 1, + False, + False, + False, + True, + "SELECT pg_advisory_xact_lock(%s, %s) AS lock;", + ), + ( + 7, + 2, + False, + False, + True, + True, + "SELECT pg_advisory_xact_lock_shared(%s, %s) AS lock;", + ), + ( + 8, + 3, + False, + False, + True, + False, + "SELECT pg_try_advisory_xact_lock_shared(%s, %s) AS lock;", + ), + ( + 9, + 4, + False, + False, + False, + False, + "SELECT pg_try_advisory_xact_lock(%s, %s) AS lock;", + ), + # session level + (10, None, False, True, False, True, "SELECT pg_advisory_lock(%s) AS lock;"), + (11, None, True, True, False, True, "SELECT pg_advisory_unlock(%s) AS lock;"), + ( + 12, + None, + False, + True, + True, + True, + "SELECT pg_advisory_lock_shared(%s) AS lock;", + ), + ( + 13, + None, + True, + True, + True, + True, + "SELECT pg_advisory_unlock_shared(%s) AS lock;", + ), + ( + 14, + None, + False, + True, + False, + False, + "SELECT pg_try_advisory_lock(%s) AS lock;", + ), + ( + 15, + None, + True, + True, + False, + False, + "SELECT pg_try_advisory_unlock(%s) AS lock;", + ), + ( + 16, + None, + False, + True, + True, + False, + "SELECT pg_try_advisory_lock_shared(%s) AS lock;", + ), + ( + 17, + None, + True, + True, + True, + False, + "SELECT pg_try_advisory_unlock_shared(%s) AS lock;", + ), + (18, 1, False, True, False, True, "SELECT pg_advisory_lock(%s, %s) AS lock;"), + (19, 2, True, True, False, True, "SELECT pg_advisory_unlock(%s, %s) AS lock;"), + ( + 20, + 3, + False, + True, + True, + True, + "SELECT pg_advisory_lock_shared(%s, %s) AS lock;", + ), + ( + 21, + 4, + True, + True, + True, + True, + "SELECT pg_advisory_unlock_shared(%s, %s) AS lock;", + ), + ( + 22, + 5, + False, + True, + False, + False, + "SELECT pg_try_advisory_lock(%s, %s) AS lock;", + ), + ( + 23, + 6, + True, + True, + False, + False, + "SELECT pg_try_advisory_unlock(%s, %s) AS lock;", + ), + ( + 24, + 7, + False, + True, + True, + False, + "SELECT pg_try_advisory_lock_shared(%s, %s) AS lock;", + ), + ( + 25, + 8, + True, + True, + True, + False, + "SELECT pg_try_advisory_unlock_shared(%s, %s) AS lock;", + ), + ], +) def test_execute_lock(key1, key2, unlock, session, shared, wait, expected_query): with patch("contentcuration.db.advisory_lock.connection") as conn: cursor = mock.Mock() @@ -61,7 +225,9 @@ def test_execute_lock(key1, key2, unlock, session, shared, wait, expected_query) conn.in_atomic_block.return_value = not session cursor.execute.return_value = True - with execute_lock(key1, key2=key2, unlock=unlock, session=session, shared=shared, wait=wait) as c: + with execute_lock( + key1, key2=key2, unlock=unlock, session=session, shared=shared, wait=wait + ) as c: assert c == cursor expected_params = [key1] @@ -73,22 +239,27 @@ def test_execute_lock(key1, key2, unlock, session, shared, wait, expected_query) assert params == expected_params -@mark.parametrize("unlock, in_atomic_block", [ - (False, False), - (True, False), - (True, True), -]) +@mark.parametrize( + "unlock, in_atomic_block", + [ + (False, False), + (True, False), + (True, True), + ], +) def test_execute_lock__not_implemented(unlock, in_atomic_block): with patch("contentcuration.db.advisory_lock.connection") as conn: conn.in_atomic_block = in_atomic_block with raises(NotImplementedError): - with execute_lock(99, key2=99, unlock=unlock, session=False, shared=False, wait=False): + with execute_lock( + 99, key2=99, unlock=unlock, session=False, shared=False, wait=False + ): pass -START_SIGNAL = 'START_SIGNAL' -END_SIGNAL = 'END_SIGNAL' +START_SIGNAL = "START_SIGNAL" +END_SIGNAL = "END_SIGNAL" SLEEP_SEC = 0.1 @@ -126,6 +297,7 @@ class AdvisoryLockDatabaseTest(SimpleTestCase): """ Test case that creates simultaneous locking situations """ + # this test manages its own transactions allow_database_queries = True diff --git a/contentcuration/contentcuration/tests/helpers.py b/contentcuration/contentcuration/tests/helpers.py index 73371135be..2635e79f56 100644 --- a/contentcuration/contentcuration/tests/helpers.py +++ b/contentcuration/contentcuration/tests/helpers.py @@ -1,4 +1,3 @@ -from builtins import str from importlib import import_module import mock @@ -52,4 +51,5 @@ def mock_class_instance(target): class MockClass(target_cls): def __new__(cls, *args, **kwargs): return mock.Mock(spec_set=cls) + return MockClass() diff --git a/contentcuration/contentcuration/tests/test_assessment_item_keypair_constraint_migration_.py b/contentcuration/contentcuration/tests/test_assessment_item_keypair_constraint_migration_.py index caec5669ae..acfabcffc9 100644 --- a/contentcuration/contentcuration/tests/test_assessment_item_keypair_constraint_migration_.py +++ b/contentcuration/contentcuration/tests/test_assessment_item_keypair_constraint_migration_.py @@ -6,16 +6,15 @@ class TestForwardAssessmentItemKeypairConstraint(StudioTestCase): - def test_prevent_two_identical_keypairs(self): contentnode = cc.ContentNode.objects.create(kind_id=exercise(), extra_fields={}) contentnode.save() - item1 = cc.AssessmentItem.objects.create(assessment_id='abc') + item1 = cc.AssessmentItem.objects.create(assessment_id="abc") item1.contentnode = contentnode item1.save() - item2 = cc.AssessmentItem.objects.create(assessment_id='abc') + item2 = cc.AssessmentItem.objects.create(assessment_id="abc") item2.contentnode = contentnode with pytest.raises(Exception) as execinfo: item2.save() - assert 'duplicate key value violates unique constraint' in str(execinfo.value) + assert "duplicate key value violates unique constraint" in str(execinfo.value) diff --git a/contentcuration/contentcuration/tests/test_asynctask.py b/contentcuration/contentcuration/tests/test_asynctask.py index 4496680f9c..79b239099b 100644 --- a/contentcuration/contentcuration/tests/test_asynctask.py +++ b/contentcuration/contentcuration/tests/test_asynctask.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import threading import time import uuid @@ -97,11 +95,14 @@ def _celery_task_worker(): # clear the "fixups" which would mess up the connection to the DB app.fixups = [] app._fixups = [] - app.worker_main(argv=[ - "worker", - "--task-events", - "--concurrency", "1", - ]) + app.worker_main( + argv=[ + "worker", + "--task-events", + "--concurrency", + "1", + ] + ) def _return_celery_task_object(task_id): @@ -116,6 +117,7 @@ class AsyncTaskTestCase(TransactionTestCase): This MUST use `serialized_rollback` due to DB transaction isolation interactions between the pytest framework and running the Celery worker in another thread """ + serialized_rollback = True @classmethod @@ -168,7 +170,9 @@ def test_asynctask_reports_success(self): self.assertEqual(celery_task_result.task_name, "test_task") self.assertEqual(async_result.status, states.SUCCESS) self.assertEqual(TaskResult.objects.get(task_id=async_result.id).result, "42") - self.assertEqual(TaskResult.objects.get(task_id=async_result.id).status, states.SUCCESS) + self.assertEqual( + TaskResult.objects.get(task_id=async_result.id).status, states.SUCCESS + ) def test_asynctask_reports_error(self): """ @@ -198,7 +202,9 @@ def test_only_create_async_task_creates_task_entry(self): async_result = plain_test_task.apply() result = self._wait_for(async_result) self.assertEquals(result, 42) - self.assertEquals(TaskResult.objects.filter(task_id=async_result.task_id).count(), 0) + self.assertEquals( + TaskResult.objects.filter(task_id=async_result.task_id).count(), 0 + ) @pytest.mark.skip(reason="This test is flaky on Github Actions") def test_fetch_or_enqueue_task(self): @@ -266,4 +272,4 @@ def test_revoke_task(self): try: TaskResult.objects.get(task_id=async_result.task_id, status=states.REVOKED) except TaskResult.DoesNotExist: - self.fail('Missing revoked task result') + self.fail("Missing revoked task result") diff --git a/contentcuration/contentcuration/tests/test_authentication.py b/contentcuration/contentcuration/tests/test_authentication.py index 5a8979d6ae..bae5a743c4 100644 --- a/contentcuration/contentcuration/tests/test_authentication.py +++ b/contentcuration/contentcuration/tests/test_authentication.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import urllib.parse from django.urls import reverse diff --git a/contentcuration/contentcuration/tests/test_channel_model.py b/contentcuration/contentcuration/tests/test_channel_model.py index 0014aeb41c..fd65c5753d 100755 --- a/contentcuration/contentcuration/tests/test_channel_model.py +++ b/contentcuration/contentcuration/tests/test_channel_model.py @@ -1,11 +1,8 @@ #!/usr/bin/env python -from __future__ import division - import json from datetime import datetime from django.urls import reverse_lazy -from past.utils import old_div from .base import BaseAPITestCase from .base import StudioTestCase @@ -160,7 +157,9 @@ def setUp(self): super(GetAllChannelsTestCase, self).setUp() # create 10 channels for comparison - self.channels = [Channel.objects.create(actor_id=self.admin_user.id) for _ in range(10)] + self.channels = [ + Channel.objects.create(actor_id=self.admin_user.id) for _ in range(10) + ] def test_returns_all_channels_in_the_db(self): """ @@ -179,7 +178,9 @@ class ChannelSetTestCase(BaseAPITestCase): def setUp(self): super(ChannelSetTestCase, self).setUp() self.channelset = mixer.blend(ChannelSet, editors=[self.user]) - self.channels = [Channel.objects.create(actor_id=self.user.id) for _ in range(10)] + self.channels = [ + Channel.objects.create(actor_id=self.user.id) for _ in range(10) + ] for chann in self.channels: chann.secret_tokens.add(self.channelset.secret_token) chann.editors.add(self.user) @@ -234,7 +235,7 @@ def test_save_channels_to_token(self): def test_public_endpoint(self): """ Make sure public endpoint returns all the channels under the token """ - published_channel_count = int(old_div(len(self.channels), 2)) + published_channel_count = int(len(self.channels) // 2) for c in self.channels[:published_channel_count]: c.main_tree.published = True c.main_tree.save() @@ -275,7 +276,9 @@ class ChannelMetadataSaveTestCase(StudioTestCase): def setUp(self): super(ChannelMetadataSaveTestCase, self).setUp() - self.channels = [Channel.objects.create(actor_id=self.admin_user.id) for _ in range(5)] + self.channels = [ + Channel.objects.create(actor_id=self.admin_user.id) for _ in range(5) + ] for c in self.channels: c.main_tree.changed = False c.main_tree.save() diff --git a/contentcuration/contentcuration/tests/test_channel_views.py b/contentcuration/contentcuration/tests/test_channel_views.py index 5ca54b28a3..439d7c8d95 100644 --- a/contentcuration/contentcuration/tests/test_channel_views.py +++ b/contentcuration/contentcuration/tests/test_channel_views.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from django.conf import settings from django.core.cache import cache from django.db import connection diff --git a/contentcuration/contentcuration/tests/test_chef_pipeline.py b/contentcuration/contentcuration/tests/test_chef_pipeline.py index 3a0e0ea0c9..26f98dcb67 100644 --- a/contentcuration/contentcuration/tests/test_chef_pipeline.py +++ b/contentcuration/contentcuration/tests/test_chef_pipeline.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import json from django.core.files.uploadedfile import SimpleUploadedFile @@ -195,7 +193,9 @@ def test_add_node_with_tags(self): node_data = node_json( {"kind": "video", "license": cc.License.objects.all()[0].license_name} ) - unique_title = "This is a title that we can almost certainly find uniquely later" + unique_title = ( + "This is a title that we can almost certainly find uniquely later" + ) node_data["tags"] = ["test"] node_data["title"] = unique_title response = self.post( diff --git a/contentcuration/contentcuration/tests/test_completion_criteria.py b/contentcuration/contentcuration/tests/test_completion_criteria.py index 6af5cdcfa6..a0daec10d7 100644 --- a/contentcuration/contentcuration/tests/test_completion_criteria.py +++ b/contentcuration/contentcuration/tests/test_completion_criteria.py @@ -15,15 +15,28 @@ def test_validate__success__empty(self): validate({}) def test_validate__fail__model(self): - with self.assertRaisesRegex(ValidationError, "model 'does not exist' is not one of"): + with self.assertRaisesRegex( + ValidationError, "model 'does not exist' is not one of" + ): validate({"model": "does not exist"}) def test_validate__fail__threshold(self): - with self.assertRaisesRegex(ValidationError, "object doesn't satisfy 'anyOf' conditions"): + with self.assertRaisesRegex( + ValidationError, "object doesn't satisfy 'anyOf' conditions" + ): validate({"model": completion_criteria.PAGES, "threshold": "not a number"}) def test_validate__content_kind(self): with self.assertRaisesRegex(ValidationError, "is invalid for content kind"): - validate({"model": completion_criteria.PAGES, "threshold": 1}, kind=content_kinds.EXERCISE) + validate( + {"model": completion_criteria.PAGES, "threshold": 1}, + kind=content_kinds.EXERCISE, + ) with self.assertRaisesRegex(ValidationError, "is invalid for content kind"): - validate({"model": completion_criteria.MASTERY, "threshold": {"mastery_model": mastery_criteria.DO_ALL}}, kind=content_kinds.DOCUMENT) + validate( + { + "model": completion_criteria.MASTERY, + "threshold": {"mastery_model": mastery_criteria.DO_ALL}, + }, + kind=content_kinds.DOCUMENT, + ) diff --git a/contentcuration/contentcuration/tests/test_contentnodes.py b/contentcuration/contentcuration/tests/test_contentnodes.py index d9b0ae235d..bc4f73b0b9 100644 --- a/contentcuration/contentcuration/tests/test_contentnodes.py +++ b/contentcuration/contentcuration/tests/test_contentnodes.py @@ -1,13 +1,7 @@ -from __future__ import absolute_import -from __future__ import division - import random import string import time import uuid -from builtins import range -from builtins import str -from builtins import zip import pytest from django.db import IntegrityError @@ -18,7 +12,6 @@ from le_utils.constants import format_presets from mixer.backend.django import mixer from mock import patch -from past.utils import old_div from . import testdata from .base import StudioTestCase @@ -44,7 +37,7 @@ def _create_nodes(num_nodes, title, parent=None, levels=2): for i in range(num_nodes): new_node = ContentNode.objects.create(title=title, parent=parent, kind=topic) # create a couple levels for testing purposes - if i > 0 and levels > 1 and i % (old_div(num_nodes, levels)) == 0: + if i > 0 and levels > 1 and i % (num_nodes // levels) == 0: parent = new_node @@ -187,12 +180,44 @@ def test_get_node_details(self): # assert format of list fields, including that they do not contain invalid data list_fields = [ - "kind_count", "languages", "accessible_languages", "licenses", "tags", "original_channels", - "authors", "aggregators", "providers", "copyright_holders" + "kind_count", + "languages", + "accessible_languages", + "licenses", + "tags", + "original_channels", + "authors", + "aggregators", + "providers", + "copyright_holders", ] for field in list_fields: - self.assertIsInstance(details.get(field), list, f"Field '{field}' isn't a list") - self.assertEqual(len(details[field]), len([value for value in details[field] if value]), f"List field '{field}' has falsy values") + self.assertIsInstance( + details.get(field), list, f"Field '{field}' isn't a list" + ) + self.assertEqual( + len(details[field]), + len([value for value in details[field] if value]), + f"List field '{field}' has falsy values", + ) + + def test_get_details_with_null_provenance_fields(self): + node = ContentNode.objects.create( + title="Null Fields Test", + parent=self.channel.main_tree, + kind=self.topic, + author=None, + provider=None, + aggregator=None, + copyright_holder=None, + ) + + details = node.get_details() + + assert details["authors"] == [] + assert details["providers"] == [] + assert details["aggregators"] == [] + assert details["copyright_holders"] == [] class NodeOperationsTestCase(StudioTestCase): @@ -831,7 +856,9 @@ def test_resync_after_more_subs_added(self): def _create_video_node(self, title, parent, withsubs=False): data = dict( - kind_id="video", title=title, node_id="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + kind_id="video", + title=title, + node_id="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", ) video_node = testdata.node(data, parent=parent) @@ -876,7 +903,9 @@ def _setup_original_and_deriative_nodes(self): # Setup derivative channel self.new_channel = Channel.objects.create( - name="derivative of teschannel", source_id="lkajs", actor_id=self.admin_user.id + name="derivative of teschannel", + source_id="lkajs", + actor_id=self.admin_user.id, ) self.new_channel.save() self.new_channel.main_tree = self._create_empty_tree() @@ -946,7 +975,7 @@ class NodeCompletionTestCase(StudioTestCase): }, "model": completion_criteria.MASTERY, } - } + }, } def setUp(self): @@ -967,30 +996,52 @@ def test_create_topic_set_complete_parent_no_title(self): def test_create_topic_set_complete_parent_title(self): channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.TOPIC, parent=channel.main_tree) + new_obj = ContentNode( + title="yes", kind_id=content_kinds.TOPIC, parent=channel.main_tree + ) new_obj.save() new_obj.mark_complete() self.assertTrue(new_obj.complete) def test_create_video_set_complete_no_license(self): channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.VIDEO, parent=channel.main_tree) + new_obj = ContentNode( + title="yes", kind_id=content_kinds.VIDEO, parent=channel.main_tree + ) new_obj.save() - File.objects.create(contentnode=new_obj, preset_id=format_presets.VIDEO_HIGH_RES, checksum=uuid.uuid4().hex) + File.objects.create( + contentnode=new_obj, + preset_id=format_presets.VIDEO_HIGH_RES, + checksum=uuid.uuid4().hex, + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_video_set_complete_custom_license_no_description(self): - custom_licenses = list(License.objects.filter(is_custom=True).values_list("pk", flat=True)) + custom_licenses = list( + License.objects.filter(is_custom=True).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.VIDEO, parent=channel.main_tree, license_id=custom_licenses[0], copyright_holder="Some person") + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.VIDEO, + parent=channel.main_tree, + license_id=custom_licenses[0], + copyright_holder="Some person", + ) new_obj.save() - File.objects.create(contentnode=new_obj, preset_id=format_presets.VIDEO_HIGH_RES, checksum=uuid.uuid4().hex) + File.objects.create( + contentnode=new_obj, + preset_id=format_presets.VIDEO_HIGH_RES, + checksum=uuid.uuid4().hex, + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_video_set_complete_custom_license_with_description(self): - custom_licenses = list(License.objects.filter(is_custom=True).values_list("pk", flat=True)) + custom_licenses = list( + License.objects.filter(is_custom=True).values_list("pk", flat=True) + ) channel = testdata.channel() new_obj = ContentNode( title="yes", @@ -998,50 +1049,109 @@ def test_create_video_set_complete_custom_license_with_description(self): parent=channel.main_tree, license_id=custom_licenses[0], license_description="don't do this!", - copyright_holder="Some person" + copyright_holder="Some person", ) new_obj.save() - File.objects.create(contentnode=new_obj, preset_id=format_presets.VIDEO_HIGH_RES, checksum=uuid.uuid4().hex) + File.objects.create( + contentnode=new_obj, + preset_id=format_presets.VIDEO_HIGH_RES, + checksum=uuid.uuid4().hex, + ) new_obj.mark_complete() self.assertTrue(new_obj.complete) - def test_create_video_set_complete_copyright_holder_required_no_copyright_holder(self): - required_holder = list(License.objects.filter(copyright_holder_required=True, is_custom=False).values_list("pk", flat=True)) + def test_create_video_set_complete_copyright_holder_required_no_copyright_holder( + self, + ): + required_holder = list( + License.objects.filter( + copyright_holder_required=True, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.VIDEO, parent=channel.main_tree, license_id=required_holder[0]) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.VIDEO, + parent=channel.main_tree, + license_id=required_holder[0], + ) new_obj.save() - File.objects.create(contentnode=new_obj, preset_id=format_presets.VIDEO_HIGH_RES, checksum=uuid.uuid4().hex) + File.objects.create( + contentnode=new_obj, + preset_id=format_presets.VIDEO_HIGH_RES, + checksum=uuid.uuid4().hex, + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_video_set_complete_copyright_holder_required_copyright_holder(self): - required_holder = list(License.objects.filter(copyright_holder_required=True, is_custom=False).values_list("pk", flat=True)) + required_holder = list( + License.objects.filter( + copyright_holder_required=True, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.VIDEO, parent=channel.main_tree, license_id=required_holder[0], copyright_holder="Some person") + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.VIDEO, + parent=channel.main_tree, + license_id=required_holder[0], + copyright_holder="Some person", + ) new_obj.save() - File.objects.create(contentnode=new_obj, preset_id=format_presets.VIDEO_HIGH_RES, checksum=uuid.uuid4().hex) + File.objects.create( + contentnode=new_obj, + preset_id=format_presets.VIDEO_HIGH_RES, + checksum=uuid.uuid4().hex, + ) new_obj.mark_complete() self.assertTrue(new_obj.complete) def test_create_video_no_files(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.VIDEO, parent=channel.main_tree, license_id=licenses[0]) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.VIDEO, + parent=channel.main_tree, + license_id=licenses[0], + ) new_obj.save() new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_video_thumbnail_only(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.VIDEO, parent=channel.main_tree, license_id=licenses[0]) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.VIDEO, + parent=channel.main_tree, + license_id=licenses[0], + ) new_obj.save() - File.objects.create(contentnode=new_obj, preset_id=format_presets.VIDEO_THUMBNAIL, checksum=uuid.uuid4().hex) + File.objects.create( + contentnode=new_obj, + preset_id=format_presets.VIDEO_THUMBNAIL, + checksum=uuid.uuid4().hex, + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_video_invalid_completion_criterion(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() new_obj = ContentNode( title="yes", @@ -1059,121 +1169,299 @@ def test_create_video_invalid_completion_criterion(self): }, "model": completion_criteria.MASTERY, } - } + }, }, ) new_obj.save() - File.objects.create(contentnode=new_obj, preset_id=format_presets.VIDEO_HIGH_RES, checksum=uuid.uuid4().hex) + File.objects.create( + contentnode=new_obj, + preset_id=format_presets.VIDEO_HIGH_RES, + checksum=uuid.uuid4().hex, + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_exercise_no_assessment_items(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0], extra_fields=self.new_extra_fields) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields=self.new_extra_fields, + ) new_obj.save() new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_exercise_invalid_assessment_item_no_question(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0], extra_fields=self.new_extra_fields) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields=self.new_extra_fields, + ) new_obj.save() - AssessmentItem.objects.create(contentnode=new_obj, answers="[{\"correct\": true, \"text\": \"answer\"}]") + AssessmentItem.objects.create( + contentnode=new_obj, answers='[{"correct": true, "text": "answer"}]' + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_exercise_invalid_assessment_item_no_answers(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0], extra_fields=self.new_extra_fields) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields=self.new_extra_fields, + ) new_obj.save() - AssessmentItem.objects.create(contentnode=new_obj, question="This is a question") + AssessmentItem.objects.create( + contentnode=new_obj, question="This is a question" + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) + def test_create_exercise_valid_assessment_item_free_response_no_answers(self): + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) + channel = testdata.channel() + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields=self.new_extra_fields, + ) + new_obj.save() + AssessmentItem.objects.create( + contentnode=new_obj, + question="This is a question", + type=exercises.FREE_RESPONSE, + ) + new_obj.mark_complete() + self.assertTrue(new_obj.complete) + def test_create_exercise_invalid_assessment_item_no_correct_answers(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0], extra_fields=self.new_extra_fields) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields=self.new_extra_fields, + ) new_obj.save() - AssessmentItem.objects.create(contentnode=new_obj, question="This is a question", answers="[{\"correct\": false, \"text\": \"answer\"}]") + AssessmentItem.objects.create( + contentnode=new_obj, + question="This is a question", + answers='[{"correct": false, "text": "answer"}]', + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_exercise_valid_assessment_item_no_correct_answers_input(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0], extra_fields=self.new_extra_fields) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields=self.new_extra_fields, + ) new_obj.save() AssessmentItem.objects.create( contentnode=new_obj, question="This is a question", - answers="[{\"correct\": false, \"text\": \"answer\"}]", - type=exercises.INPUT_QUESTION + answers='[{"correct": false, "text": "answer"}]', + type=exercises.INPUT_QUESTION, + ) + new_obj.mark_complete() + self.assertTrue(new_obj.complete) + + def test_create_exercise_valid_assessment_item_true_false(self): + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) + channel = testdata.channel() + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields=self.new_extra_fields, + ) + new_obj.save() + AssessmentItem.objects.create( + contentnode=new_obj, + question="True?", + answers='[{"answer":"True","correct":true,"order":1},{"answer":"False","correct":false,"order":2}]', + type="true_false", ) new_obj.mark_complete() self.assertTrue(new_obj.complete) def test_create_exercise_valid_assessment_items(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0], extra_fields=self.new_extra_fields) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields=self.new_extra_fields, + ) new_obj.save() - AssessmentItem.objects.create(contentnode=new_obj, question="This is a question", answers="[{\"correct\": true, \"text\": \"answer\"}]") + AssessmentItem.objects.create( + contentnode=new_obj, + question="This is a question", + answers='[{"correct": true, "text": "answer"}]', + ) new_obj.mark_complete() self.assertTrue(new_obj.complete) def test_create_exercise_valid_assessment_items_raw_data(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0], extra_fields=self.new_extra_fields) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields=self.new_extra_fields, + ) new_obj.save() - AssessmentItem.objects.create(contentnode=new_obj, raw_data="{\"question\": {}}") + AssessmentItem.objects.create(contentnode=new_obj, raw_data='{"question": {}}') new_obj.mark_complete() self.assertTrue(new_obj.complete) def test_create_exercise_no_extra_fields(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0]) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + ) new_obj.save() - AssessmentItem.objects.create(contentnode=new_obj, question="This is a question", answers="[{\"correct\": true, \"text\": \"answer\"}]") + AssessmentItem.objects.create( + contentnode=new_obj, + question="This is a question", + answers='[{"correct": true, "text": "answer"}]', + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_exercise_old_extra_fields(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0], extra_fields=self.old_extra_fields) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields=self.old_extra_fields, + ) new_obj.save() - AssessmentItem.objects.create(contentnode=new_obj, question="This is a question", answers="[{\"correct\": true, \"text\": \"answer\"}]") + AssessmentItem.objects.create( + contentnode=new_obj, + question="This is a question", + answers='[{"correct": true, "text": "answer"}]', + ) new_obj.mark_complete() self.assertTrue(new_obj.complete) def test_create_exercise_bad_new_extra_fields(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0], extra_fields={ - "randomize": False, - "options": { - "completion_criteria": { - "threshold": { - "mastery_model": exercises.M_OF_N, - "n": 5, - }, - "model": completion_criteria.MASTERY, - } - } - }) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields={ + "randomize": False, + "options": { + "completion_criteria": { + "threshold": { + "mastery_model": exercises.M_OF_N, + "n": 5, + }, + "model": completion_criteria.MASTERY, + } + }, + }, + ) new_obj.save() - AssessmentItem.objects.create(contentnode=new_obj, question="This is a question", answers="[{\"correct\": true, \"text\": \"answer\"}]") + AssessmentItem.objects.create( + contentnode=new_obj, + question="This is a question", + answers='[{"correct": true, "text": "answer"}]', + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_video_null_extra_fields(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() new_obj = ContentNode( title="yes", @@ -1184,7 +1472,11 @@ def test_create_video_null_extra_fields(self): extra_fields=None, ) new_obj.save() - File.objects.create(contentnode=new_obj, preset_id=format_presets.VIDEO_HIGH_RES, checksum=uuid.uuid4().hex) + File.objects.create( + contentnode=new_obj, + preset_id=format_presets.VIDEO_HIGH_RES, + checksum=uuid.uuid4().hex, + ) try: new_obj.mark_complete() except AttributeError: diff --git a/contentcuration/contentcuration/tests/test_createchannel.py b/contentcuration/contentcuration/tests/test_createchannel.py index 01405e2622..ec28381e6b 100644 --- a/contentcuration/contentcuration/tests/test_createchannel.py +++ b/contentcuration/contentcuration/tests/test_createchannel.py @@ -1,8 +1,4 @@ -from __future__ import absolute_import - import json -from builtins import range -from builtins import str import requests from django.urls import reverse_lazy @@ -61,11 +57,17 @@ def setUp(self): super(CreateChannelTestCase, self).setUpBase() self.topic = models.ContentKind.objects.get(kind="topic") self.license = models.License.objects.all()[0] - self.fileinfo_audio = create_studio_file("abc", preset='audio', ext='mp3') - self.fileinfo_video = create_studio_file("def", preset='high_res_video', ext='mp4') - self.fileinfo_video_webm = create_studio_file("ghi", preset='high_res_video', ext='webm') - self.fileinfo_document = create_studio_file("jkl", preset='document', ext='pdf') - self.fileinfo_exercise = create_studio_file("mno", preset='exercise', ext='perseus') + self.fileinfo_audio = create_studio_file("abc", preset="audio", ext="mp3") + self.fileinfo_video = create_studio_file( + "def", preset="high_res_video", ext="mp4" + ) + self.fileinfo_video_webm = create_studio_file( + "ghi", preset="high_res_video", ext="webm" + ) + self.fileinfo_document = create_studio_file("jkl", preset="document", ext="pdf") + self.fileinfo_exercise = create_studio_file( + "mno", preset="exercise", ext="perseus" + ) def create_channel(self): create_channel_url = str(reverse_lazy("api_create_channel")) diff --git a/contentcuration/contentcuration/tests/test_decorators.py b/contentcuration/contentcuration/tests/test_decorators.py index 2c795716d7..e1a6ded135 100644 --- a/contentcuration/contentcuration/tests/test_decorators.py +++ b/contentcuration/contentcuration/tests/test_decorators.py @@ -20,4 +20,6 @@ def do_test(): mock_task.fetch_or_enqueue.assert_not_called() do_test() - mock_task.fetch_or_enqueue.assert_called_once_with(self.user, user_id=self.user.id) + mock_task.fetch_or_enqueue.assert_called_once_with( + self.user, user_id=self.user.id + ) diff --git a/contentcuration/contentcuration/tests/test_exportchannel.py b/contentcuration/contentcuration/tests/test_exportchannel.py index 71b09bda94..5c850597d7 100644 --- a/contentcuration/contentcuration/tests/test_exportchannel.py +++ b/contentcuration/contentcuration/tests/test_exportchannel.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import json import os import random @@ -17,6 +15,7 @@ from kolibri_content.router import get_active_content_database from kolibri_content.router import set_active_content_database from le_utils.constants import exercises +from le_utils.constants import format_presets from le_utils.constants.labels import accessibility_categories from le_utils.constants.labels import learning_activities from le_utils.constants.labels import levels @@ -32,8 +31,10 @@ from .testdata import node as create_node from .testdata import slideshow from .testdata import thumbnail_bytes +from .testdata import tree from contentcuration import models as cc from contentcuration.models import CustomTaskMetadata +from contentcuration.utils.assessment.qti.archive import hex_to_qti_id from contentcuration.utils.celery.tasks import generate_task_signature from contentcuration.utils.publish import ChannelIncompleteError from contentcuration.utils.publish import convert_channel_thumbnail @@ -42,6 +43,8 @@ from contentcuration.utils.publish import fill_published_fields from contentcuration.utils.publish import map_prerequisites from contentcuration.utils.publish import MIN_SCHEMA_VERSION +from contentcuration.utils.publish import NoneContentNodeTreeError +from contentcuration.utils.publish import publish_channel from contentcuration.utils.publish import set_channel_icon_encoding from contentcuration.viewsets.base import create_change_tracker @@ -53,11 +56,10 @@ def description(): class ExportChannelTestCase(StudioTestCase): - @classmethod def setUpClass(cls): super(ExportChannelTestCase, cls).setUpClass() - cls.patch_copy_db = patch('contentcuration.utils.publish.save_export_database') + cls.patch_copy_db = patch("contentcuration.utils.publish.save_export_database") cls.patch_copy_db.start() @classmethod @@ -74,39 +76,57 @@ def setUp(self): self.content_channel.save() # Add some incomplete nodes to ensure they don't get published. - new_node = create_node({'kind_id': 'topic', 'title': 'Incomplete topic', 'children': []}) + new_node = create_node( + {"kind_id": "topic", "title": "Incomplete topic", "children": []} + ) new_node.complete = False new_node.parent = self.content_channel.main_tree new_node.save() - new_video = create_node({'kind_id': 'video', 'title': 'Incomplete video', 'children': []}) + new_video = create_node( + {"kind_id": "video", "title": "Incomplete video", "children": []} + ) new_video.complete = False new_video.parent = new_node new_video.save() # Add a complete node within an incomplete node to ensure it's excluded. - new_video = create_node({'kind_id': 'video', 'title': 'Complete video', 'children': []}) + new_video = create_node( + {"kind_id": "video", "title": "Complete video", "children": []} + ) new_video.complete = True new_video.parent = new_node new_video.save() # Add a node with tags greater than 30 chars to ensure they get excluded. - new_video = create_node({'kind_id': 'video', 'tags': [{'tag_name': 'kolbasdasdasrissadasdwzxcztudio'}, {'tag_name': 'kolbasdasdasrissadasdwzxcztudi'}, - {'tag_name': 'kolbasdasdasrissadasdwzxc'}], 'title': 'kolibri tag test', 'children': []}) + new_video = create_node( + { + "kind_id": "video", + "tags": [ + {"tag_name": "kolbasdasdasrissadasdwzxcztudio"}, + {"tag_name": "kolbasdasdasrissadasdwzxcztudi"}, + {"tag_name": "kolbasdasdasrissadasdwzxc"}, + ], + "title": "kolibri tag test", + "children": [], + } + ) new_video.complete = True new_video.parent = self.content_channel.main_tree new_video.save() # Add a node to test completion criteria. extra_fields = { - "options": { - "completion_criteria": { - "model": "time", - "threshold": 20 - } - } + "options": {"completion_criteria": {"model": "time", "threshold": 20}} } - new_video = create_node({'kind_id': 'video', 'title': 'Completion criteria test', 'extra_fields': extra_fields, 'children': []}) + new_video = create_node( + { + "kind_id": "video", + "title": "Completion criteria test", + "extra_fields": extra_fields, + "children": [], + } + ) new_video.complete = True new_video.parent = self.content_channel.main_tree new_video.save() @@ -120,29 +140,41 @@ def setUp(self): "m": 1, "n": 2, "mastery_model": exercises.M_OF_N, - } + }, } } } current_exercise = cc.ContentNode.objects.filter(kind_id="exercise").first() - new_exercise = create_node({'kind_id': 'exercise', 'title': 'Mastery test', 'extra_fields': extra_fields}) + new_exercise = create_node( + { + "kind_id": "exercise", + "title": "Mastery test", + "extra_fields": extra_fields, + } + ) new_exercise.complete = True new_exercise.parent = current_exercise.parent new_exercise.save() - bad_container = create_node({'kind_id': 'topic', 'title': 'Bad topic container', 'children': []}) + bad_container = create_node( + {"kind_id": "topic", "title": "Bad topic container", "children": []} + ) bad_container.complete = True bad_container.parent = self.content_channel.main_tree bad_container.save() # exercise without mastery model, but marked as complete - broken_exercise = create_node({'kind_id': 'exercise', 'title': 'Bad mastery test', 'extra_fields': {}}) + broken_exercise = create_node( + {"kind_id": "exercise", "title": "Bad mastery test", "extra_fields": {}} + ) broken_exercise.complete = True broken_exercise.parent = bad_container broken_exercise.save() - thumbnail_data = create_studio_file(thumbnail_bytes, preset="exercise_thumbnail", ext="png") + thumbnail_data = create_studio_file( + thumbnail_bytes, preset="exercise_thumbnail", ext="png" + ) file_obj = thumbnail_data["db_file"] file_obj.contentnode = new_exercise file_obj.save() @@ -152,17 +184,25 @@ def setUp(self): ai.save() legacy_extra_fields = { - 'mastery_model': exercises.M_OF_N, - 'randomize': True, - 'm': 1, - 'n': 2 + "mastery_model": exercises.M_OF_N, + "randomize": True, + "m": 1, + "n": 2, } - legacy_exercise = create_node({'kind_id': 'exercise', 'title': 'Legacy Mastery test', 'extra_fields': legacy_extra_fields}) + legacy_exercise = create_node( + { + "kind_id": "exercise", + "title": "Legacy Mastery test", + "extra_fields": legacy_extra_fields, + } + ) legacy_exercise.complete = True legacy_exercise.parent = current_exercise.parent legacy_exercise.save() - thumbnail_data = create_studio_file(thumbnail_bytes, preset="exercise_thumbnail", ext="png") + thumbnail_data = create_studio_file( + thumbnail_bytes, preset="exercise_thumbnail", ext="png" + ) file_obj = thumbnail_data["db_file"] file_obj.contentnode = legacy_exercise file_obj.save() @@ -171,15 +211,57 @@ def setUp(self): ai.contentnode = legacy_exercise ai.save() + # Add an exercise with free response question to test QTI generation + qti_extra_fields = { + "options": { + "completion_criteria": { + "model": "mastery", + "threshold": { + "m": 1, + "n": 2, + "mastery_model": exercises.M_OF_N, + }, + } + } + } + qti_exercise = create_node( + { + "kind_id": "exercise", + "title": "QTI Free Response Exercise", + "extra_fields": qti_extra_fields, + } + ) + qti_exercise.complete = True + qti_exercise.parent = current_exercise.parent + qti_exercise.save() + + # Create a free response assessment item + cc.AssessmentItem.objects.create( + contentnode=qti_exercise, + assessment_id=uuid.uuid4().hex, + type=exercises.FREE_RESPONSE, + question="What is the capital of France?", + answers=json.dumps([{"answer": "Paris", "correct": True}]), + hints=json.dumps([]), + raw_data="{}", + order=4, + randomize=False, + ) + + for ai in current_exercise.assessment_items.all()[:2]: + ai.id = None + ai.contentnode = qti_exercise + ai.save() + first_topic = self.content_channel.main_tree.get_descendants().first() # Add a publishable topic to ensure it does not inherit but that its children do - new_node = create_node({'kind_id': 'topic', 'title': 'Disinherited topic'}) + new_node = create_node({"kind_id": "topic", "title": "Disinherited topic"}) new_node.complete = True new_node.parent = first_topic new_node.save() - new_video = create_node({'kind_id': 'video', 'title': 'Inheriting video'}) + new_video = create_node({"kind_id": "video", "title": "Inheriting video"}) new_video.complete = True new_video.parent = new_node new_video.save() @@ -223,7 +305,9 @@ def setUp(self): first_topic_first_child.save() set_channel_icon_encoding(self.content_channel) - self.tempdb = create_content_database(self.content_channel, True, self.admin_user.id, True) + self.tempdb = create_content_database( + self.content_channel, True, self.admin_user.id, True + ) set_active_content_database(self.tempdb) @@ -249,7 +333,9 @@ def test_contentnode_license_data(self): for node in nodes: if node.license: self.assertEqual(node.license_name, node.license.license_name) - self.assertEqual(node.license_description, node.license.license_description) + self.assertEqual( + node.license_description, node.license.license_description + ) def test_contentnode_incomplete_not_published(self): kolibri_nodes = kolibri_models.ContentNode.objects.all() @@ -272,10 +358,15 @@ def test_contentnode_incomplete_not_published(self): assert kolibri_nodes.filter(pk=node.node_id).count() == 0 # bad exercise node should not be published (technically incomplete) - assert kolibri_models.ContentNode.objects.filter(title='Bad mastery test').count() == 0 + assert ( + kolibri_models.ContentNode.objects.filter(title="Bad mastery test").count() + == 0 + ) def test_tags_greater_than_30_excluded(self): - tag_node = kolibri_models.ContentNode.objects.filter(title='kolibri tag test').first() + tag_node = kolibri_models.ContentNode.objects.filter( + title="kolibri tag test" + ).first() published_tags = tag_node.tags.all() assert published_tags.count() == 2 @@ -283,19 +374,25 @@ def test_tags_greater_than_30_excluded(self): assert len(t.tag_name) <= 30 def test_duration_override_on_completion_criteria_time(self): - completion_criteria_node = kolibri_models.ContentNode.objects.filter(title='Completion criteria test').first() - non_completion_criteria_node = kolibri_models.ContentNode.objects.filter(title='kolibri tag test').first() + completion_criteria_node = kolibri_models.ContentNode.objects.filter( + title="Completion criteria test" + ).first() + non_completion_criteria_node = kolibri_models.ContentNode.objects.filter( + title="kolibri tag test" + ).first() assert completion_criteria_node.duration == 20 assert non_completion_criteria_node.duration == 100 def test_completion_criteria_set(self): - completion_criteria_node = kolibri_models.ContentNode.objects.filter(title='Completion criteria test').first() + completion_criteria_node = kolibri_models.ContentNode.objects.filter( + title="Completion criteria test" + ).first() - self.assertEqual(completion_criteria_node.options["completion_criteria"], { - "model": "time", - "threshold": 20 - }) + self.assertEqual( + completion_criteria_node.options["completion_criteria"], + {"model": "time", "threshold": 20}, + ) def test_contentnode_channel_id_data(self): channel = kolibri_models.ChannelMetadata.objects.first() @@ -313,140 +410,244 @@ def test_contentnode_file_checksum_data(self): def test_contentnode_file_extension_data(self): files = kolibri_models.File.objects.all() assert files.count() > 0 - for file in files.prefetch_related('local_file'): + for file in files.prefetch_related("local_file"): self.assertEqual(file.extension, file.local_file.extension) def test_contentnode_file_size_data(self): files = kolibri_models.File.objects.all() assert files.count() > 0 - for file in files.prefetch_related('local_file'): + for file in files.prefetch_related("local_file"): self.assertEqual(file.file_size, file.local_file.file_size) def test_channel_icon_encoding(self): self.assertIsNotNone(self.content_channel.icon_encoding) def test_assessment_metadata(self): - for i, exercise in enumerate(kolibri_models.ContentNode.objects.filter(kind="exercise")): + for i, exercise in enumerate( + kolibri_models.ContentNode.objects.filter(kind="exercise") + ): asm = exercise.assessmentmetadata.first() self.assertTrue(isinstance(asm.assessment_item_ids, list)) mastery = asm.mastery_model self.assertTrue(isinstance(mastery, dict)) - self.assertEqual(mastery["type"], exercises.DO_ALL if i == 0 else exercises.M_OF_N) + self.assertEqual( + mastery["type"], exercises.DO_ALL if i == 0 else exercises.M_OF_N + ) self.assertEqual(mastery["m"], 3 if i == 0 else 1) self.assertEqual(mastery["n"], 3 if i == 0 else 2) def test_inherited_language(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - for child in kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id)[1:]: + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + for child in kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + )[1:]: if child.kind == "topic": - self.assertIsNone(child.lang_id) + self.assertEqual(child.lang_id, self.content_channel.language_id) self.assertEqual(child.children.first().lang_id, "fr") else: self.assertEqual(child.lang_id, "fr") def test_inherited_language_no_overwrite(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - first_child = kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id).first() + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + first_child = kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + ).first() self.assertEqual(first_child.lang_id, "sw") def test_inherited_category(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - for child in kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id)[1:]: + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + for child in kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + )[1:]: if child.kind == "topic": self.assertIsNone(child.categories) - self.assertEqual(child.children.first().categories, subjects.MATHEMATICS) + self.assertEqual( + child.children.first().categories, subjects.MATHEMATICS + ) else: self.assertEqual(child.categories, subjects.MATHEMATICS) def test_inherited_category_no_overwrite(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - first_child = kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id).first() + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + first_child = kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + ).first() self.assertEqual(first_child.categories, subjects.ALGEBRA) def test_inherited_needs(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - for child in kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id)[1:]: + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + for child in kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + )[1:]: if child.kind == "topic": self.assertIsNone(child.learner_needs) - self.assertEqual(child.children.first().learner_needs, needs.PRIOR_KNOWLEDGE) + self.assertEqual( + child.children.first().learner_needs, needs.PRIOR_KNOWLEDGE + ) else: self.assertEqual(child.learner_needs, needs.PRIOR_KNOWLEDGE) def test_inherited_needs_no_overwrite(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - first_child = kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id).first() + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + first_child = kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + ).first() self.assertEqual(first_child.learner_needs, needs.FOR_BEGINNERS) def test_topics_no_accessibility_label(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) topic = kolibri_models.ContentNode.objects.get(id=first_topic_node_id) self.assertIsNone(topic.accessibility_labels) def test_child_no_inherit_accessibility_label(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - first_child = kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id).first() + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + first_child = kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + ).first() # Should only be the learning activities we set on the child directly, not any parent ones. - self.assertEqual(first_child.accessibility_labels, accessibility_categories.CAPTIONS_SUBTITLES) + self.assertEqual( + first_child.accessibility_labels, + accessibility_categories.CAPTIONS_SUBTITLES, + ) def test_inherited_grade_levels(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - for child in kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id): + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + for child in kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + ): if child.kind == "topic": self.assertIsNone(child.grade_levels) - self.assertEqual(child.children.first().grade_levels, levels.LOWER_SECONDARY) + self.assertEqual( + child.children.first().grade_levels, levels.LOWER_SECONDARY + ) else: self.assertEqual(child.grade_levels, levels.LOWER_SECONDARY) def test_inherited_resource_types(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - for child in kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id): + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + for child in kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + ): if child.kind == "topic": self.assertIsNone(child.resource_types) - self.assertEqual(child.children.first().resource_types, resource_type.LESSON_PLAN) + self.assertEqual( + child.children.first().resource_types, resource_type.LESSON_PLAN + ) else: self.assertEqual(child.resource_types, resource_type.LESSON_PLAN) def test_topics_no_learning_activity(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) topic = kolibri_models.ContentNode.objects.get(id=first_topic_node_id) self.assertIsNone(topic.learning_activities) def test_child_no_inherit_learning_activity(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - first_child = kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id).first() + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + first_child = kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + ).first() # Should only be the learning activities we set on the child directly, not any parent ones. self.assertEqual(first_child.learning_activities, learning_activities.LISTEN) def test_publish_no_modify_exercise_extra_fields(self): exercise = cc.ContentNode.objects.get(title="Mastery test") - self.assertEqual(exercise.extra_fields["options"]["completion_criteria"]["threshold"], { - "m": 1, - "n": 2, - "mastery_model": exercises.M_OF_N, - }) - published_exercise = kolibri_models.ContentNode.objects.get(title="Mastery test") - self.assertEqual(published_exercise.options["completion_criteria"]["threshold"], { - "m": 1, - "n": 2, - "mastery_model": exercises.M_OF_N, - }) + self.assertEqual( + exercise.extra_fields["options"]["completion_criteria"]["threshold"], + { + "m": 1, + "n": 2, + "mastery_model": exercises.M_OF_N, + }, + ) + published_exercise = kolibri_models.ContentNode.objects.get( + title="Mastery test" + ) + self.assertEqual( + published_exercise.options["completion_criteria"]["threshold"], + { + "m": 1, + "n": 2, + "mastery_model": exercises.M_OF_N, + }, + ) def test_publish_no_modify_legacy_exercise_extra_fields(self): current_exercise = cc.ContentNode.objects.get(title="Legacy Mastery test") - self.assertEqual(current_exercise.extra_fields, { - 'mastery_model': exercises.M_OF_N, - 'randomize': True, - 'm': 1, - 'n': 2 - }) + self.assertEqual( + current_exercise.extra_fields, + {"mastery_model": exercises.M_OF_N, "randomize": True, "m": 1, "n": 2}, + ) + def test_qti_exercise_generates_qti_archive(self): + """Test that exercises with free response questions generate QTI archive files.""" + qti_exercise = cc.ContentNode.objects.get(title="QTI Free Response Exercise") -class EmptyChannelTestCase(StudioTestCase): + # Check that a QTI archive file was created + qti_files = qti_exercise.files.filter(preset_id=format_presets.QTI_ZIP) + self.assertEqual( + qti_files.count(), + 1, + "QTI exercise should have exactly one QTI archive file", + ) + + qti_file = qti_files.first() + self.assertIsNotNone( + qti_file.file_on_disk, "QTI file should have file_on_disk content" + ) + self.assertTrue( + qti_file.original_filename.endswith(".zip"), + "QTI file should be a zip archive", + ) + + def test_qti_archive_contains_manifest_and_assessment_ids(self): + published_qti_exercise = kolibri_models.ContentNode.objects.get( + title="QTI Free Response Exercise" + ) + assessment_ids = ( + published_qti_exercise.assessmentmetadata.first().assessment_item_ids + ) + + # Should have exactly one assessment ID corresponding to our free response question + self.assertEqual( + len(assessment_ids), 3, "Should have exactly three assessment IDs" + ) + + # The assessment ID should match the one from our assessment item + qti_exercise = cc.ContentNode.objects.get(title="QTI Free Response Exercise") + for i, ai in enumerate(qti_exercise.assessment_items.order_by("order")): + self.assertEqual(assessment_ids[i], hex_to_qti_id(ai.assessment_id)) + + +class EmptyChannelTestCase(StudioTestCase): @classmethod def setUpClass(cls): super(EmptyChannelTestCase, cls).setUpClass() - cls.patch_copy_db = patch('contentcuration.utils.publish.save_export_database') + cls.patch_copy_db = patch("contentcuration.utils.publish.save_export_database") cls.patch_copy_db.start() @classmethod @@ -468,7 +669,7 @@ class ChannelExportUtilityFunctionTestCase(StudioTestCase): @classmethod def setUpClass(cls): super(ChannelExportUtilityFunctionTestCase, cls).setUpClass() - cls.patch_copy_db = patch('contentcuration.utils.publish.save_export_database') + cls.patch_copy_db = patch("contentcuration.utils.publish.save_export_database") cls.patch_copy_db.start() @classmethod @@ -481,10 +682,9 @@ def setUp(self): fh, output_db = tempfile.mkstemp(suffix=".sqlite3") self.output_db = output_db set_active_content_database(self.output_db) - call_command("migrate", - "content", - database=get_active_content_database(), - no_input=True) + call_command( + "migrate", "content", database=get_active_content_database(), no_input=True + ) def tearDown(self): # Clean up datbase connection after the test @@ -501,23 +701,39 @@ def test_convert_channel_thumbnail_empty_thumbnail(self): self.assertEqual("", convert_channel_thumbnail(channel)) def test_convert_channel_thumbnail_static_thumbnail(self): - channel = cc.Channel.objects.create(thumbnail="/static/kolibri_flapping_bird.png", actor_id=self.admin_user.id) + channel = cc.Channel.objects.create( + thumbnail="/static/kolibri_flapping_bird.png", actor_id=self.admin_user.id + ) self.assertEqual("", convert_channel_thumbnail(channel)) def test_convert_channel_thumbnail_encoding_valid(self): channel = cc.Channel.objects.create( - thumbnail="/content/kolibri_flapping_bird.png", thumbnail_encoding={"base64": "flappy_bird"}, actor_id=self.admin_user.id) + thumbnail="/content/kolibri_flapping_bird.png", + thumbnail_encoding={"base64": "flappy_bird"}, + actor_id=self.admin_user.id, + ) self.assertEqual("flappy_bird", convert_channel_thumbnail(channel)) def test_convert_channel_thumbnail_encoding_invalid(self): - with patch("contentcuration.utils.publish.get_thumbnail_encoding", return_value="this is a test"): - channel = cc.Channel.objects.create(thumbnail="/content/kolibri_flapping_bird.png", thumbnail_encoding={}, actor_id=self.admin_user.id) + with patch( + "contentcuration.utils.publish.get_thumbnail_encoding", + return_value="this is a test", + ): + channel = cc.Channel.objects.create( + thumbnail="/content/kolibri_flapping_bird.png", + thumbnail_encoding={}, + actor_id=self.admin_user.id, + ) self.assertEqual("this is a test", convert_channel_thumbnail(channel)) def test_create_slideshow_manifest(self): - ccnode = cc.ContentNode.objects.create(kind_id=slideshow(), extra_fields={}, complete=True) + ccnode = cc.ContentNode.objects.create( + kind_id=slideshow(), extra_fields={}, complete=True + ) create_slideshow_manifest(ccnode) - manifest_collection = cc.File.objects.filter(contentnode=ccnode, preset_id=u"slideshow_manifest") + manifest_collection = cc.File.objects.filter( + contentnode=ccnode, preset_id=u"slideshow_manifest" + ) assert len(manifest_collection) == 1 @@ -525,7 +741,7 @@ class ChannelExportPrerequisiteTestCase(StudioTestCase): @classmethod def setUpClass(cls): super(ChannelExportPrerequisiteTestCase, cls).setUpClass() - cls.patch_copy_db = patch('contentcuration.utils.publish.save_export_database') + cls.patch_copy_db = patch("contentcuration.utils.publish.save_export_database") cls.patch_copy_db.start() def setUp(self): @@ -533,10 +749,9 @@ def setUp(self): fh, output_db = tempfile.mkstemp(suffix=".sqlite3") self.output_db = output_db set_active_content_database(self.output_db) - call_command("migrate", - "content", - database=get_active_content_database(), - no_input=True) + call_command( + "migrate", "content", database=get_active_content_database(), no_input=True + ) def tearDown(self): # Clean up datbase connection after the test @@ -549,10 +764,14 @@ def tearDown(self): def test_nonexistent_prerequisites(self): channel = cc.Channel.objects.create(actor_id=self.admin_user.id) - node1 = cc.ContentNode.objects.create(kind_id="exercise", parent_id=channel.main_tree.pk, complete=True) + node1 = cc.ContentNode.objects.create( + kind_id="exercise", parent_id=channel.main_tree.pk, complete=True + ) exercise = cc.ContentNode.objects.create(kind_id="exercise", complete=True) - cc.PrerequisiteContentRelationship.objects.create(target_node=exercise, prerequisite=node1) + cc.PrerequisiteContentRelationship.objects.create( + target_node=exercise, prerequisite=node1 + ) map_prerequisites(node1) @@ -564,7 +783,7 @@ def test_fill_published_fields(self): fill_published_fields(channel, version_notes) self.assertTrue(channel.published_data) self.assertIsNotNone(channel.published_data.get(0)) - self.assertEqual(channel.published_data[0]['version_notes'], version_notes) + self.assertEqual(channel.published_data[0]["version_notes"], version_notes) class PublishFailCleansUpTaskObjects(StudioTestCase): @@ -573,12 +792,14 @@ def setUp(self): def test_failed_task_objects_cleaned_up_when_publishing(self): channel_id = self.channel.id - task_name = 'export-channel' + task_name = "export-channel" task_id = uuid.uuid4().hex - pk = 'ab684452f2ad4ba6a1426d6410139f60' - table = 'channel' - task_kwargs = json.dumps({'pk': pk, 'table': table}) - signature = generate_task_signature(task_name, task_kwargs=task_kwargs, channel_id=channel_id) + pk = "ab684452f2ad4ba6a1426d6410139f60" + table = "channel" + task_kwargs = json.dumps({"pk": pk, "table": table}) + signature = generate_task_signature( + task_name, task_kwargs=task_kwargs, channel_id=channel_id + ) TaskResult.objects.create( task_id=task_id, @@ -587,10 +808,7 @@ def test_failed_task_objects_cleaned_up_when_publishing(self): ) CustomTaskMetadata.objects.create( - task_id=task_id, - channel_id=channel_id, - user=self.user, - signature=signature + task_id=task_id, channel_id=channel_id, user=self.user, signature=signature ) assert TaskResult.objects.filter(task_id=task_id).exists() @@ -599,6 +817,151 @@ def test_failed_task_objects_cleaned_up_when_publishing(self): with create_change_tracker(pk, table, channel_id, self.user, task_name): assert not TaskResult.objects.filter(task_id=task_id).exists() assert not CustomTaskMetadata.objects.filter(task_id=task_id).exists() - new_task_result = TaskResult.objects.filter(task_name=task_name, status=states.STARTED).first() - new_custom_task_metadata = CustomTaskMetadata.objects.get(channel_id=channel_id, user=self.user, signature=signature) + new_task_result = TaskResult.objects.filter( + task_name=task_name, status=states.STARTED + ).first() + new_custom_task_metadata = CustomTaskMetadata.objects.get( + channel_id=channel_id, user=self.user, signature=signature + ) assert new_custom_task_metadata.task_id == new_task_result.task_id + + +class PublishStagingTreeTestCase(StudioTestCase): + @classmethod + def setUpClass(cls): + super(PublishStagingTreeTestCase, cls).setUpClass() + cls.patch_copy_db = patch("contentcuration.utils.publish.save_export_database") + cls.mock_save_export = cls.patch_copy_db.start() + + @classmethod + def tearDownClass(cls): + super(PublishStagingTreeTestCase, cls).tearDownClass() + cls.patch_copy_db.stop() + + def setUp(self): + super(PublishStagingTreeTestCase, self).setUp() + + self.channel_version = 3 + self.incomplete_video_in_staging = "Incomplete video in staging tree" + self.complete_video_in_staging = "Complete video in staging tree" + self.incomplete_video_in_main = "Incomplete video in main tree" + self.complete_video_in_main = "Complete video in main tree" + + self.content_channel = channel() + self.content_channel.staging_tree = tree() + self.content_channel.version = self.channel_version + self.content_channel.save() + + # Incomplete node should be excluded. + new_node = create_node( + { + "kind_id": "video", + "title": self.incomplete_video_in_staging, + "children": [], + } + ) + new_node.complete = False + new_node.parent = self.content_channel.staging_tree + new_node.published = False + new_node.save() + + # Complete node should be included. + new_video = create_node( + { + "kind_id": "video", + "title": self.complete_video_in_staging, + "children": [], + } + ) + new_video.complete = True + new_video.parent = self.content_channel.staging_tree + new_node.published = False + new_video.save() + + # Incomplete node in main_tree. + new_node = create_node( + {"kind_id": "video", "title": self.incomplete_video_in_main, "children": []} + ) + new_node.complete = False + new_node.parent = self.content_channel.main_tree + new_node.published = False + new_node.save() + + # Complete node in main_tree. + new_node = create_node( + {"kind_id": "video", "title": self.complete_video_in_main, "children": []} + ) + new_node.complete = True + new_node.parent = self.content_channel.main_tree + new_node.published = False + new_node.save() + + def run_publish_channel(self): + publish_channel( + self.admin_user.id, + self.content_channel.id, + version_notes="", + force=False, + force_exercises=False, + send_email=False, + progress_tracker=None, + language="fr", + use_staging_tree=True, + ) + + def test_none_staging_tree(self): + self.content_channel.staging_tree = None + self.content_channel.save() + with self.assertRaises(NoneContentNodeTreeError): + self.run_publish_channel() + + def test_staging_tree_published(self): + self.assertFalse(self.content_channel.staging_tree.published) + self.run_publish_channel() + self.content_channel.refresh_from_db() + self.assertTrue(self.content_channel.staging_tree.published) + + def test_next_version_exported(self): + self.run_publish_channel() + self.mock_save_export.assert_called_with( + self.content_channel.id, + "next", + True, + ) + + def test_main_tree_not_impacted(self): + self.assertFalse(self.content_channel.main_tree.published) + self.run_publish_channel() + self.content_channel.refresh_from_db() + self.assertFalse(self.content_channel.main_tree.published) + + def test_channel_version_not_incremented(self): + self.assertEqual(self.content_channel.version, self.channel_version) + self.run_publish_channel() + self.content_channel.refresh_from_db() + self.assertEqual(self.content_channel.version, self.channel_version) + + def test_staging_tree_used_for_publish(self): + set_channel_icon_encoding(self.content_channel) + self.tempdb = create_content_database( + self.content_channel, + True, + self.admin_user.id, + True, + progress_tracker=None, + use_staging_tree=True, + ) + set_active_content_database(self.tempdb) + + nodes = kolibri_models.ContentNode.objects.all() + self.assertEqual( + nodes.filter(title=self.incomplete_video_in_staging).count(), 0 + ) + self.assertEqual(nodes.filter(title=self.complete_video_in_staging).count(), 1) + self.assertEqual(nodes.filter(title=self.incomplete_video_in_main).count(), 0) + self.assertEqual(nodes.filter(title=self.complete_video_in_main).count(), 0) + + cleanup_content_database_connection(self.tempdb) + set_active_content_database(None) + if os.path.exists(self.tempdb): + os.remove(self.tempdb) diff --git a/contentcuration/contentcuration/tests/test_files.py b/contentcuration/contentcuration/tests/test_files.py index 215743e93d..125dcbc48b 100755 --- a/contentcuration/contentcuration/tests/test_files.py +++ b/contentcuration/contentcuration/tests/test_files.py @@ -1,28 +1,29 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import - import json -from builtins import str +from uuid import uuid4 +import mock import pytest +from django.core.exceptions import PermissionDenied from django.core.files.storage import default_storage -from django.core.files.uploadedfile import SimpleUploadedFile -from django.urls import reverse_lazy +from django.db.models import Exists +from django.db.models import OuterRef from le_utils.constants import content_kinds -from le_utils.constants import format_presets from mock import patch -from past.builtins import basestring from .base import BaseAPITestCase from .base import StudioTestCase from .testdata import base64encoding from .testdata import generated_base64encoding -from .testdata import srt_subtitle +from .testdata import node from contentcuration.api import write_raw_content_to_storage +from contentcuration.models import Channel from contentcuration.models import ContentNode from contentcuration.models import delete_empty_file_reference from contentcuration.models import File from contentcuration.models import generate_object_storage_name +from contentcuration.models import StagedFile +from contentcuration.models import User from contentcuration.utils.files import create_thumbnail_from_base64 from contentcuration.utils.files import get_thumbnail_encoding from contentcuration.utils.nodes import map_files_to_node @@ -69,7 +70,7 @@ def test_internal_thumbnail(self): } ] map_files_to_node(self.user, node, file_data) - self.assertTrue(isinstance(node.thumbnail_encoding, basestring)) + self.assertTrue(isinstance(node.thumbnail_encoding, str)) thumbnail_data = json.loads(node.thumbnail_encoding) self.assertEqual(thumbnail_data["base64"], generated_base64encoding()) @@ -90,3 +91,144 @@ def test_delete_empty_file_reference(self): assert default_storage.exists(storage_path), "file should be saved" delete_empty_file_reference(checksum, "pdf") assert not default_storage.exists(storage_path), "file should be deleted" + + +class StagedChannelSpaceTestCase(StudioTestCase): + """ + Tests for + - User.check_channel_space() + - User.get_available_staged_space() + - User.check_staged_space() + """ + + def setUp(self): + super().setUpBase() + + self.staged_channel = Channel.objects.create( + name="Staged", actor_id=self.user.id, language_id="en" + ) + self.staged_channel.save() + + file_node_id = uuid4().hex + self.staged_channel.staging_tree = node( + { + "node_id": uuid4().hex, + "kind_id": "topic", + "title": "Root Node", + "children": [ + { + "node_id": file_node_id, + "kind_id": "video", + "title": "Video 1", + } + ], + }, + parent=None, + ) + self.staged_channel.save() + self.node = ContentNode.objects.get(node_id=file_node_id) + self._set_uploader(self.channel) + self._set_uploader( + self.staged_channel, self.staged_channel.staging_tree.tree_id + ) + self.node_file = self.node.files.all()[0] + + def _set_uploader(self, channel: Channel, tree_id=None): + if tree_id is None: + tree_id = channel.main_tree.tree_id + + File.objects.filter( + Exists( + ContentNode.objects.filter( + tree_id=tree_id, id=OuterRef("contentnode_id") + ) + ) + ).update(uploaded_by=self.user) + + def _create_duplicate(self, file: File): + dupe_node = node( + { + "node_id": uuid4().hex, + "kind_id": "video", + "title": "Video 2", + }, + parent=self.node.parent, + ) + dupe_file = dupe_node.files.all()[0] + dupe_file.file_size = file.file_size + dupe_file.checksum = file.checksum + dupe_file.uploaded_by = self.user + dupe_file.save(set_by_file_on_disk=False) + + def test_check_channel_space__okay(self): + try: + self.user.check_channel_space(self.staged_channel) + except PermissionDenied: + self.fail("Staging channel space is larger than available") + + def test_check_channel_space__duplicate_checksum_same_tree(self): + # set file to slightly more than half, such that if both files are included, it should + # exceed the available space + self.node_file.file_size = self.user.disk_space / 2 + 1 + self.node_file.checksum = uuid4().hex + self.node_file.save(set_by_file_on_disk=False) + self._create_duplicate(self.node_file) + + try: + self.user.check_channel_space(self.staged_channel) + except PermissionDenied: + self.fail("Staging channel space is larger than available") + + def test_check_channel_space__duplicate_checksum_different_tree(self): + # set file larger than space + self.node_file.file_size = self.user.disk_space + 1 + self.node_file.save(set_by_file_on_disk=False) + + # ensure file has matching checksum to another file in deployed channel tree, + # which should be the case because of how the test fixtures function + deployed_file_count = File.objects.filter( + Exists( + ContentNode.objects.filter( + tree_id=self.channel.main_tree.tree_id, + id=OuterRef("contentnode_id"), + ) + ), + checksum=self.node_file.checksum, + ).count() + self.assertGreaterEqual(deployed_file_count, 1) + + try: + self.user.check_channel_space(self.staged_channel) + except PermissionDenied: + self.fail("Staging channel space is larger than available") + + def test_check_channel_space__fail(self): + self.node_file.file_size = self.user.disk_space + 1 + self.node_file.checksum = uuid4().hex + self.node_file.save(set_by_file_on_disk=False) + + with self.assertRaises(PermissionDenied): + self.user.check_channel_space(self.staged_channel) + + def test_get_available_staged_space(self): + f = StagedFile.objects.create( + checksum=uuid4().hex, + uploaded_by=self.user, + file_size=100, + ) + expected_available_space = self.user.disk_space - f.file_size + self.assertEqual( + expected_available_space, self.user.get_available_staged_space() + ) + + def test_check_staged_space__exists(self): + f = StagedFile.objects.create( + checksum=uuid4().hex, + uploaded_by=self.user, + file_size=100, + ) + with mock.patch.object( + User, "get_available_staged_space" + ) as get_available_staged_space: + get_available_staged_space.return_value = 0 + self.assertTrue(self.user.check_staged_space(100, f.checksum)) diff --git a/contentcuration/contentcuration/tests/test_format_preset_model.py b/contentcuration/contentcuration/tests/test_format_preset_model.py index 93287a9d2c..465070fa02 100644 --- a/contentcuration/contentcuration/tests/test_format_preset_model.py +++ b/contentcuration/contentcuration/tests/test_format_preset_model.py @@ -1,13 +1,8 @@ -from __future__ import absolute_import - -import types - from .base import StudioTestCase from contentcuration.models import FormatPreset class GetPresetTestCase(StudioTestCase): - def test_accepts_string(self): """ Check that if we pass in a string, we won't error out. @@ -33,7 +28,6 @@ def test_returns_none_if_called_with_nonexistent_preset(self): class GuessFormatPresetTestCase(StudioTestCase): - def test_accepts_string(self): """ Make sure we don't raise an error if we pass a string. diff --git a/contentcuration/contentcuration/tests/test_forms.py b/contentcuration/contentcuration/tests/test_forms.py index e004edf75b..7b2e7f1157 100644 --- a/contentcuration/contentcuration/tests/test_forms.py +++ b/contentcuration/contentcuration/tests/test_forms.py @@ -9,16 +9,11 @@ class ForgotPasswordFormTest(StudioAPITestCase): def setUp(self): self.request = mock.Mock() - self.data = dict( - email="tester@tester.com" - ) + self.data = dict(email="tester@tester.com") self.form = ForgotPasswordForm(data=self.data) self.form.full_clean() self.form.get_activation_key = mock.Mock() - self.extra_email_context = dict( - site="LE", - domain="test.learningequality.org" - ) + self.extra_email_context = dict(site="LE", domain="test.learningequality.org") @mock.patch("contentcuration.forms.PasswordResetForm.save") def test_save__active(self, parent_save): @@ -26,12 +21,12 @@ def test_save__active(self, parent_save): self.form.save( request=self.request, extra_email_context=self.extra_email_context, - from_email="another@tester.com" + from_email="another@tester.com", ) parent_save.assert_called_once_with( request=self.request, extra_email_context=self.extra_email_context, - from_email="another@tester.com" + from_email="another@tester.com", ) @mock.patch("contentcuration.forms.render_to_string") @@ -42,64 +37,58 @@ def test_save__inactive(self, email_user, render_to_string): user.save() self.form.get_activation_key.return_value = "activation key" - render_to_string.side_effect = [ - "Subject", - "Message" - ] + render_to_string.side_effect = ["Subject", "Message"] self.form.save( request=self.request, extra_email_context=self.extra_email_context, - from_email="another@tester.com" + from_email="another@tester.com", ) context = { - 'activation_key': "activation key", - 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS, - 'site': "LE", - 'user': user, - 'domain': "test.learningequality.org", + "activation_key": "activation key", + "expiration_days": settings.ACCOUNT_ACTIVATION_DAYS, + "site": "LE", + "user": user, + "domain": "test.learningequality.org", } render_to_string.assert_any_call( - 'registration/password_reset_subject.txt', - context + "registration/password_reset_subject.txt", context ) render_to_string.assert_any_call( - 'registration/activation_needed_email.txt', - context + "registration/activation_needed_email.txt", context + ) + email_user.assert_called_once_with( + "Subject", "Message", settings.DEFAULT_FROM_EMAIL ) - email_user.assert_called_once_with("Subject", "Message", settings.DEFAULT_FROM_EMAIL) @mock.patch("contentcuration.forms.render_to_string") @mock.patch("contentcuration.forms.User.email_user") def test_save__inactive__no_password(self, email_user, render_to_string): user = testdata.user("tester@tester.com") user.is_active = False - user.password = '' + user.password = "" user.save() - render_to_string.side_effect = [ - "Subject", - "Message" - ] + render_to_string.side_effect = ["Subject", "Message"] self.form.save( request=self.request, extra_email_context=self.extra_email_context, - from_email="another@tester.com" + from_email="another@tester.com", ) self.form.get_activation_key.assert_not_called() context = { - 'site': "LE", - 'user': user, - 'domain': "test.learningequality.org", + "site": "LE", + "user": user, + "domain": "test.learningequality.org", } render_to_string.assert_any_call( - 'registration/password_reset_subject.txt', - context + "registration/password_reset_subject.txt", context ) render_to_string.assert_any_call( - 'registration/registration_needed_email.txt', - context + "registration/registration_needed_email.txt", context + ) + email_user.assert_called_once_with( + "Subject", "Message", settings.DEFAULT_FROM_EMAIL ) - email_user.assert_called_once_with("Subject", "Message", settings.DEFAULT_FROM_EMAIL) @mock.patch("contentcuration.forms.render_to_string") @mock.patch("contentcuration.forms.User.email_user") @@ -108,7 +97,7 @@ def test_save__missing(self, parent_save, email_user, render_to_string): self.form.save( request=self.request, extra_email_context=self.extra_email_context, - from_email="another@tester.com" + from_email="another@tester.com", ) parent_save.assert_not_called() self.form.get_activation_key.assert_not_called() diff --git a/contentcuration/contentcuration/tests/test_gcs_storage.py b/contentcuration/contentcuration/tests/test_gcs_storage.py index 165877f9ac..a58420873e 100755 --- a/contentcuration/contentcuration/tests/test_gcs_storage.py +++ b/contentcuration/contentcuration/tests/test_gcs_storage.py @@ -21,7 +21,9 @@ def setUp(self): self.blob_class = mock.create_autospec(Blob) self.blob_obj = self.blob_class("blob", "blob") self.mock_client = mock.create_autospec(Client) - self.storage = GoogleCloudStorage(client=self.mock_client(), bucket_name="bucket") + self.storage = GoogleCloudStorage( + client=self.mock_client(), bucket_name="bucket" + ) self.content = BytesIO(b"content") def test_calls_upload_from_file(self): @@ -41,7 +43,9 @@ def test_calls_upload_from_file_with_a_file_object_and_content_type(self): self.storage.save("myfile.jpg", self.content, blob_object=self.blob_obj) # Check that we pass self.content file_object to upload_from_file - self.blob_obj.upload_from_file.assert_called_once_with(self.content, content_type="image/jpeg") + self.blob_obj.upload_from_file.assert_called_once_with( + self.content, content_type="image/jpeg" + ) def test_checks_does_not_upload_file_if_empty(self): """ @@ -71,7 +75,10 @@ def test_uploads_cache_control_private_if_content_database(self): assert "private" in self.blob_obj.cache_control @mock.patch("contentcuration.utils.gcs_storage.BytesIO") - @mock.patch("contentcuration.utils.gcs_storage.GoogleCloudStorage._is_file_empty", return_value=False) + @mock.patch( + "contentcuration.utils.gcs_storage.GoogleCloudStorage._is_file_empty", + return_value=False, + ) def test_gzip_if_content_database(self, bytesio_mock, file_empty_mock): """ Check that if we're uploading a gzipped content database and @@ -92,6 +99,7 @@ class RandomFileSchema: """ A schema for a file we're about to upload. """ + contents = str filename = str @@ -99,7 +107,9 @@ def setUp(self): self.blob_class = mock.create_autospec(Blob) self.blob_obj = self.blob_class("blob", "blob") self.mock_client = mock.create_autospec(Client) - self.storage = GoogleCloudStorage(client=self.mock_client(), bucket_name="bucket") + self.storage = GoogleCloudStorage( + client=self.mock_client(), bucket_name="bucket" + ) self.local_file = mixer.blend(self.RandomFileSchema) def test_raises_error_if_mode_is_not_rb(self): @@ -147,8 +157,13 @@ def setUp(self): self.mock_anon_bucket = bucket_cls(self.mock_anon_client, "bucket") self.mock_anon_client.get_bucket.return_value = self.mock_anon_bucket - with mock.patch("contentcuration.utils.gcs_storage._create_default_client", return_value=self.mock_default_client), \ - mock.patch("contentcuration.utils.gcs_storage.Client.create_anonymous_client", return_value=self.mock_anon_client): + with mock.patch( + "contentcuration.utils.gcs_storage._create_default_client", + return_value=self.mock_default_client, + ), mock.patch( + "contentcuration.utils.gcs_storage.Client.create_anonymous_client", + return_value=self.mock_anon_client, + ): self.storage = CompositeGCS() def test_get_writeable_backend(self): @@ -207,8 +222,13 @@ def test_url(self): mock_blob = self.blob_cls("blob", "blob") self.mock_default_bucket.get_blob.return_value = mock_blob mock_blob.public_url = "https://storage.googleapis.com/bucket/blob" - self.assertEqual(self.storage.url("blob"), "https://storage.googleapis.com/bucket/blob") + self.assertEqual( + self.storage.url("blob"), "https://storage.googleapis.com/bucket/blob" + ) def test_get_created_time(self): self.mock_default_bucket.get_blob.return_value = self.blob_cls("blob", "blob") - self.assertEqual(self.storage.get_created_time("blob"), self.blob_cls.return_value.time_created) + self.assertEqual( + self.storage.get_created_time("blob"), + self.blob_cls.return_value.time_created, + ) diff --git a/contentcuration/contentcuration/tests/test_models.py b/contentcuration/contentcuration/tests/test_models.py index 9c093369cd..2fb728e4a3 100644 --- a/contentcuration/contentcuration/tests/test_models.py +++ b/contentcuration/contentcuration/tests/test_models.py @@ -1,5 +1,4 @@ import uuid -from uuid import uuid4 import mock import pytest @@ -43,25 +42,25 @@ def object_storage_name_tests(): "no_extension", # filename "8818ed27d0a84b016eb7907b5b4766c4", # checksum "vtt", # file_format_id - "storage/8/8/8818ed27d0a84b016eb7907b5b4766c4.vtt" # expected + "storage/8/8/8818ed27d0a84b016eb7907b5b4766c4.vtt", # expected ), ( "no_extension", # filename "8818ed27d0a84b016eb7907b5b4766c4", # checksum "", # file_format_id - "storage/8/8/8818ed27d0a84b016eb7907b5b4766c4" # expected + "storage/8/8/8818ed27d0a84b016eb7907b5b4766c4", # expected ), ( "has_extension.txt", # filename "8818ed27d0a84b016eb7907b5b4766c4", # checksum "vtt", # file_format_id - "storage/8/8/8818ed27d0a84b016eb7907b5b4766c4.txt" # expected + "storage/8/8/8818ed27d0a84b016eb7907b5b4766c4.txt", # expected ), ( "has_extension.txt", # filename "8818ed27d0a84b016eb7907b5b4766c4", # checksum "", # file_format_id - "storage/8/8/8818ed27d0a84b016eb7907b5b4766c4.txt" # expected + "storage/8/8/8818ed27d0a84b016eb7907b5b4766c4.txt", # expected ), ] @@ -72,22 +71,26 @@ def test_object_storage_name(object_storage_name_tests): actual_name = object_storage_name(test_file, filename) - assert actual_name == expected_name, \ - "Storage names don't match: Expected: '{}' Actual '{}'".format(expected_name, - actual_name) + assert ( + actual_name == expected_name + ), "Storage names don't match: Expected: '{}' Actual '{}'".format( + expected_name, actual_name + ) def test_generate_object_storage_name(object_storage_name_tests): for filename, checksum, file_format_id, expected_name in object_storage_name_tests: - default_ext = '' + default_ext = "" if file_format_id: - default_ext = '.{}'.format(file_format_id) + default_ext = ".{}".format(file_format_id) actual_name = generate_object_storage_name(checksum, filename, default_ext) - assert actual_name == expected_name, \ - "Storage names don't match: Expected: '{}' Actual '{}'".format(expected_name, - actual_name) + assert ( + actual_name == expected_name + ), "Storage names don't match: Expected: '{}' Actual '{}'".format( + expected_name, actual_name + ) def create_contentnode(parent_id): @@ -102,21 +105,15 @@ def create_contentnode(parent_id): def create_assessment_item(parent_id): - return AssessmentItem.objects.create( - contentnode=create_contentnode(parent_id) - ) + return AssessmentItem.objects.create(contentnode=create_contentnode(parent_id)) def create_assessment_item_file(parent_id): - return File.objects.create( - assessment_item=create_assessment_item(parent_id) - ) + return File.objects.create(assessment_item=create_assessment_item(parent_id)) def create_file(parent_id): - return File.objects.create( - contentnode=create_contentnode(parent_id) - ) + return File.objects.create(contentnode=create_contentnode(parent_id)) class PermissionQuerysetTestCase(StudioTestCase): @@ -140,12 +137,18 @@ def forbidden_user(self): return user def assertQuerysetContains(self, queryset, **filters): - self.assertGreater(queryset.filter(**filters).count(), 0, - "Queryset does not contain objects for: {}".format(filters)) + self.assertGreater( + queryset.filter(**filters).count(), + 0, + "Queryset does not contain objects for: {}".format(filters), + ) def assertQuerysetDoesNotContain(self, queryset, **filters): - self.assertEqual(queryset.filter(**filters).count(), 0, - "Queryset contains objects for: {}".format(filters)) + self.assertEqual( + queryset.filter(**filters).count(), + 0, + "Queryset contains objects for: {}".format(filters), + ) class ChannelTestCase(PermissionQuerysetTestCase): @@ -156,7 +159,9 @@ def base_queryset(self): def test_filter_view_queryset__public_channel(self): channel = self.public_channel - queryset = Channel.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = Channel.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetContains(queryset, pk=channel.id) user = testdata.user() @@ -169,7 +174,9 @@ def test_filter_view_queryset__public_channel__deleted(self): channel.deleted = True channel.save(actor_id=self.admin_user.id) - queryset = Channel.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = Channel.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=channel.id) user = testdata.user() @@ -180,13 +187,17 @@ def test_filter_view_queryset__public_channel__deleted(self): def test_filter_view_queryset__public_channel__anonymous(self): channel = self.public_channel - queryset = Channel.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = Channel.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetContains(queryset, pk=channel.id) def test_filter_view_queryset__private_channel(self): channel = testdata.channel() - queryset = Channel.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = Channel.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=channel.id) user = testdata.user() @@ -208,13 +219,17 @@ def test_filter_view_queryset__private_channel__pending_editor(self): def test_filter_view_queryset__private_channel__anonymous(self): channel = testdata.channel() - queryset = Channel.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = Channel.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=channel.id) def test_filter_edit_queryset__public_channel(self): channel = self.public_channel - queryset = Channel.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = Channel.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=channel.id) user = testdata.user() @@ -229,13 +244,17 @@ def test_filter_edit_queryset__public_channel(self): def test_filter_edit_queryset__public_channel__anonymous(self): channel = self.public_channel - queryset = Channel.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = Channel.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=channel.id) def test_filter_edit_queryset__private_channel(self): channel = testdata.channel() - queryset = Channel.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = Channel.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=channel.id) user = testdata.user() @@ -250,7 +269,9 @@ def test_filter_edit_queryset__private_channel(self): def test_filter_edit_queryset__private_channel__anonymous(self): channel = testdata.channel() - queryset = Channel.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = Channel.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=channel.id) def test_get_server_rev(self): @@ -268,11 +289,13 @@ def create_change(server_rev, applied): kwargs={}, ) - Change.objects.bulk_create([ - create_change(1, True), - create_change(2, True), - create_change(3, False), - ]) + Change.objects.bulk_create( + [ + create_change(1, True), + create_change(2, True), + create_change(3, False), + ] + ) self.assertEqual(channel.get_server_rev(), 2) @@ -286,7 +309,9 @@ def test_filter_view_queryset__public_channel(self): channel = self.public_channel contentnode = create_contentnode(channel.main_tree_id) - queryset = ContentNode.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = ContentNode.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetContains(queryset, pk=contentnode.id) @@ -300,7 +325,9 @@ def test_filter_view_queryset__public_channel__anonymous(self): channel = self.public_channel contentnode = create_contentnode(channel.main_tree_id) - queryset = ContentNode.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = ContentNode.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetContains(queryset, pk=contentnode.id) @@ -308,7 +335,9 @@ def test_filter_view_queryset__private_channel(self): channel = testdata.channel() contentnode = create_contentnode(channel.main_tree_id) - queryset = ContentNode.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = ContentNode.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetDoesNotContain(queryset, pk=contentnode.id) @@ -322,7 +351,9 @@ def test_filter_view_queryset__private_channel__anonymous(self): channel = testdata.channel() contentnode = create_contentnode(channel.main_tree_id) - queryset = ContentNode.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = ContentNode.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetDoesNotContain(queryset, pk=contentnode.id) @@ -337,7 +368,9 @@ def test_filter_view_queryset__orphan_tree(self): def test_filter_view_queryset__orphan_tree__anonymous(self): contentnode = create_contentnode(settings.ORPHANAGE_ROOT_ID) - queryset = ContentNode.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = ContentNode.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetDoesNotContain(queryset, pk=contentnode.id) @@ -345,7 +378,9 @@ def test_filter_edit_queryset__public_channel(self): channel = self.public_channel contentnode = create_contentnode(channel.main_tree_id) - queryset = ContentNode.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = ContentNode.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetDoesNotContain(queryset, pk=contentnode.id) @@ -364,7 +399,9 @@ def test_filter_edit_queryset__public_channel__anonymous(self): channel = self.public_channel contentnode = create_contentnode(channel.main_tree_id) - queryset = ContentNode.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = ContentNode.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetDoesNotContain(queryset, pk=contentnode.id) @@ -372,7 +409,9 @@ def test_filter_edit_queryset__private_channel(self): channel = testdata.channel() contentnode = create_contentnode(channel.main_tree_id) - queryset = ContentNode.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = ContentNode.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetDoesNotContain(queryset, pk=contentnode.id) @@ -391,7 +430,9 @@ def test_filter_edit_queryset__private_channel__anonymous(self): channel = testdata.channel() contentnode = create_contentnode(channel.main_tree_id) - queryset = ContentNode.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = ContentNode.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetDoesNotContain(queryset, pk=contentnode.id) @@ -406,7 +447,9 @@ def test_filter_edit_queryset__orphan_tree(self): def test_filter_edit_queryset__orphan_tree__anonymous(self): contentnode = create_contentnode(settings.ORPHANAGE_ROOT_ID) - queryset = ContentNode.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = ContentNode.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetDoesNotContain(queryset, pk=contentnode.id) @@ -438,7 +481,9 @@ def test_filter_by_pk__sets_cache(self): with self.settings(IS_CONTENTNODE_TABLE_PARTITIONED=True): node = ContentNode.filter_by_pk(pk=contentnode.id).first() - tree_id_from_cache = cache.get(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=contentnode.id)) + tree_id_from_cache = cache.get( + CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=contentnode.id) + ) self.assertEqual(node.tree_id, tree_id_from_cache) def test_filter_by_pk__doesnot_query_db_when_cache_hit(self): @@ -467,9 +512,13 @@ def test_filter_by_pk__tree_id_updated_on_move(self): sourcenode.move_to(targetnode, "last-child") after_move_sourcenode = ContentNode.filter_by_pk(sourcenode.id).first() - tree_id_from_cache = cache.get(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=sourcenode.id)) + tree_id_from_cache = cache.get( + CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=sourcenode.id) + ) - self.assertEqual(after_move_sourcenode.tree_id, testchannel.trash_tree.tree_id) + self.assertEqual( + after_move_sourcenode.tree_id, testchannel.trash_tree.tree_id + ) self.assertEqual(tree_id_from_cache, testchannel.trash_tree.tree_id) def test_make_content_id_unique(self): @@ -507,7 +556,9 @@ def test_filter_view_queryset__public_channel(self): channel = self.public_channel assessment_item = create_assessment_item(channel.main_tree_id) - queryset = AssessmentItem.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = AssessmentItem.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetContains(queryset, pk=assessment_item.id) user = testdata.user() @@ -519,14 +570,18 @@ def test_filter_view_queryset__public_channel__anonymous(self): channel = self.public_channel assessment_item = create_assessment_item(channel.main_tree_id) - queryset = AssessmentItem.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = AssessmentItem.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetContains(queryset, pk=assessment_item.id) def test_filter_view_queryset__private_channel(self): channel = testdata.channel() assessment_item = create_assessment_item(channel.main_tree_id) - queryset = AssessmentItem.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = AssessmentItem.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_item.id) user = testdata.user() @@ -538,14 +593,18 @@ def test_filter_view_queryset__private_channel__anonymous(self): channel = testdata.channel() assessment_item = create_assessment_item(channel.main_tree_id) - queryset = AssessmentItem.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = AssessmentItem.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_item.id) def test_filter_edit_queryset__public_channel(self): channel = self.public_channel assessment_item = create_assessment_item(channel.main_tree_id) - queryset = AssessmentItem.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = AssessmentItem.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_item.id) user = testdata.user() @@ -561,14 +620,18 @@ def test_filter_edit_queryset__public_channel__anonymous(self): channel = self.public_channel assessment_item = create_assessment_item(channel.main_tree_id) - queryset = AssessmentItem.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = AssessmentItem.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_item.id) def test_filter_edit_queryset__private_channel(self): channel = testdata.channel() assessment_item = create_assessment_item(channel.main_tree_id) - queryset = AssessmentItem.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = AssessmentItem.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_item.id) user = testdata.user() @@ -584,7 +647,9 @@ def test_filter_edit_queryset__private_channel__anonymous(self): channel = testdata.channel() assessment_item = create_assessment_item(channel.main_tree_id) - queryset = AssessmentItem.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = AssessmentItem.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_item.id) @@ -597,7 +662,9 @@ def test_filter_view_queryset__public_channel(self): channel = self.public_channel node_file = create_file(channel.main_tree_id) - queryset = File.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetContains(queryset, pk=node_file.id) user = testdata.user() @@ -609,14 +676,18 @@ def test_filter_view_queryset__public_channel__anonymous(self): channel = self.public_channel node_file = create_file(channel.main_tree_id) - queryset = File.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = File.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetContains(queryset, pk=node_file.id) def test_filter_view_queryset__private_channel(self): channel = testdata.channel() node_file = create_file(channel.main_tree_id) - queryset = File.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=node_file.id) user = testdata.user() @@ -628,14 +699,18 @@ def test_filter_view_queryset__private_channel__anonymous(self): channel = testdata.channel() node_file = create_file(channel.main_tree_id) - queryset = File.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = File.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=node_file.id) def test_filter_view_queryset__uploaded_by(self): user = testdata.user() node_file = File.objects.create(uploaded_by=user) - queryset = File.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=node_file.id) queryset = File.filter_view_queryset(self.base_queryset, user=user) @@ -645,7 +720,9 @@ def test_filter_edit_queryset__public_channel(self): channel = self.public_channel node_file = create_file(channel.main_tree_id) - queryset = File.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=node_file.id) user = testdata.user() @@ -661,14 +738,18 @@ def test_filter_edit_queryset__public_channel__anonymous(self): channel = self.public_channel node_file = create_file(channel.main_tree_id) - queryset = File.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = File.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=node_file.id) def test_filter_edit_queryset__private_channel(self): channel = testdata.channel() node_file = create_file(channel.main_tree_id) - queryset = File.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=node_file.id) user = testdata.user() @@ -684,14 +765,18 @@ def test_filter_edit_queryset__private_channel__anonymous(self): channel = testdata.channel() node_file = create_file(channel.main_tree_id) - queryset = File.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = File.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=node_file.id) def test_filter_edit_queryset__uploaded_by(self): user = testdata.user() node_file = File.objects.create(uploaded_by=user) - queryset = File.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=node_file.id) queryset = File.filter_edit_queryset(self.base_queryset, user=user) @@ -734,7 +819,7 @@ def test_invalid_file_format(self): File.objects.create( contentnode=create_contentnode(channel.main_tree_id), preset_id=format_presets.EPUB, - file_format_id='pptx', + file_format_id="pptx", ) @@ -747,7 +832,9 @@ def test_filter_view_queryset__public_channel(self): channel = self.public_channel assessment_file = create_assessment_item_file(channel.main_tree_id) - queryset = File.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetContains(queryset, pk=assessment_file.id) user = testdata.user() @@ -759,14 +846,18 @@ def test_filter_view_queryset__public_channel__anonymous(self): channel = self.public_channel assessment_file = create_assessment_item_file(channel.main_tree_id) - queryset = File.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = File.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetContains(queryset, pk=assessment_file.id) def test_filter_view_queryset__private_channel(self): channel = testdata.channel() assessment_file = create_assessment_item_file(channel.main_tree_id) - queryset = File.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_file.id) user = testdata.user() @@ -778,14 +869,18 @@ def test_filter_view_queryset__private_channel__anonymous(self): channel = testdata.channel() assessment_file = create_assessment_item_file(channel.main_tree_id) - queryset = File.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = File.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_file.id) def test_filter_edit_queryset__public_channel(self): channel = self.public_channel assessment_file = create_assessment_item_file(channel.main_tree_id) - queryset = File.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_file.id) user = testdata.user() @@ -801,14 +896,18 @@ def test_filter_edit_queryset__public_channel__anonymous(self): channel = self.public_channel assessment_file = create_assessment_item_file(channel.main_tree_id) - queryset = File.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = File.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_file.id) def test_filter_edit_queryset__private_channel(self): channel = testdata.channel() assessment_file = create_assessment_item_file(channel.main_tree_id) - queryset = File.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_file.id) user = testdata.user() @@ -824,12 +923,14 @@ def test_filter_edit_queryset__private_channel__anonymous(self): channel = testdata.channel() assessment_file = create_assessment_item_file(channel.main_tree_id) - queryset = File.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = File.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_file.id) class UserTestCase(StudioTestCase): - def _create_user(self, email, password='password', is_active=True): + def _create_user(self, email, password="password", is_active=True): user = User.objects.create(email=email) user.set_password(password) user.is_active = is_active @@ -841,15 +942,20 @@ def _setup_user_related_data(self): user_b = self._create_user("b@tester.com") # Create a sole editor non-public channel. - sole_editor_channel = Channel.objects.create(name="sole-editor", actor_id=user_a.id) + sole_editor_channel = Channel.objects.create( + name="sole-editor", actor_id=user_a.id + ) sole_editor_channel.editors.add(user_a) # Create sole-editor channel nodes. for i in range(0, 3): - testdata.node({ - "title": "sole-editor-channel-node", - "kind_id": "video", - }, parent=sole_editor_channel.main_tree) + testdata.node( + { + "title": "sole-editor-channel-node", + "kind_id": "video", + }, + parent=sole_editor_channel.main_tree, + ) # Create a sole editor public channel. public_channel = testdata.channel("public") @@ -918,7 +1024,9 @@ def test_delete(self): # Sets is_active to False? self.assertEqual(user.is_active, False) # Creates user history? - user_delete_history = UserHistory.objects.filter(user_id=user.id, action=user_history.DELETION).first() + user_delete_history = UserHistory.objects.filter( + user_id=user.id, action=user_history.DELETION + ).first() self.assertIsNotNone(user_delete_history) def test_recover(self): @@ -931,7 +1039,9 @@ def test_recover(self): # Keeps is_active to False? self.assertEqual(user.is_active, False) # Creates user history? - user_recover_history = UserHistory.objects.filter(user_id=user.id, action=user_history.RECOVERY).first() + user_recover_history = UserHistory.objects.filter( + user_id=user.id, action=user_history.RECOVERY + ).first() self.assertIsNotNone(user_recover_history) def test_hard_delete_user_related_data(self): @@ -946,7 +1056,11 @@ def test_hard_delete_user_related_data(self): self.assertTrue(Channel.objects.filter(name="public").exists()) # Deletes all user related invitations. - self.assertFalse(Invitation.objects.filter(Q(sender_id=user.id) | Q(invited_id=user.id)).exists()) + self.assertFalse( + Invitation.objects.filter( + Q(sender_id=user.id) | Q(invited_id=user.id) + ).exists() + ) # Deletes sole-editor channelsets. self.assertFalse(ChannelSet.objects.filter(name="sole-editor").exists()) @@ -956,12 +1070,42 @@ def test_hard_delete_user_related_data(self): self.assertTrue(ChannelSet.objects.filter(name="public").exists()) # All contentnodes of sole-editor channel points to ORPHANGE ROOT NODE? - self.assertFalse(ContentNode.objects.filter(~Q(parent_id=settings.ORPHANAGE_ROOT_ID) - & Q(title="sole-editor-channel-node")).exists()) + self.assertFalse( + ContentNode.objects.filter( + ~Q(parent_id=settings.ORPHANAGE_ROOT_ID) + & Q(title="sole-editor-channel-node") + ).exists() + ) # Creates user history? - user_hard_delete_history = UserHistory.objects.filter(user_id=user.id, action=user_history.RELATED_DATA_HARD_DELETION).first() + user_hard_delete_history = UserHistory.objects.filter( + user_id=user.id, action=user_history.RELATED_DATA_HARD_DELETION + ).first() self.assertIsNotNone(user_hard_delete_history) + def test_get_server_rev(self): + user = testdata.user() + + def create_change(server_rev, applied): + return Change( + user=user, + server_rev=server_rev, + created_by=user, + change_type=DELETED, + table=User.__name__, + applied=applied, + kwargs={}, + ) + + Change.objects.bulk_create( + [ + create_change(1, True), + create_change(2, True), + create_change(3, False), + ] + ) + + self.assertEqual(user.get_server_rev(), 2) + class ChannelHistoryTestCase(StudioTestCase): def setUp(self): @@ -969,29 +1113,40 @@ def setUp(self): self.channel = testdata.channel() def test_mark_channel_created(self): - self.assertEqual(1, self.channel.history.filter(action=channel_history.CREATION).count()) + self.assertEqual( + 1, self.channel.history.filter(action=channel_history.CREATION).count() + ) def test_mark_channel_deleted(self): self.assertEqual(0, self.channel.deletion_history.count()) self.channel.deleted = True self.channel.save(actor_id=self.admin_user.id) - self.assertEqual(1, self.channel.deletion_history.filter(actor=self.admin_user).count()) + self.assertEqual( + 1, self.channel.deletion_history.filter(actor=self.admin_user).count() + ) def test_mark_channel_recovered(self): - self.assertEqual(0, self.channel.history.filter(actor=self.admin_user, action=channel_history.RECOVERY).count()) + self.assertEqual( + 0, + self.channel.history.filter( + actor=self.admin_user, action=channel_history.RECOVERY + ).count(), + ) self.channel.deleted = True self.channel.save(actor_id=self.admin_user.id) self.channel.deleted = False self.channel.save(actor_id=self.admin_user.id) - self.assertEqual(1, self.channel.history.filter(actor=self.admin_user, action=channel_history.RECOVERY).count()) + self.assertEqual( + 1, + self.channel.history.filter( + actor=self.admin_user, action=channel_history.RECOVERY + ).count(), + ) def test_prune(self): i = 10 now = timezone.now() - channels = [ - self.channel, - testdata.channel() - ] + channels = [self.channel, testdata.channel()] last_history_ids = [] ChannelHistory.objects.all().delete() @@ -1012,11 +1167,12 @@ def test_prune(self): self.assertEqual(20, ChannelHistory.objects.count()) ChannelHistory.prune() self.assertEqual(2, ChannelHistory.objects.count()) - self.assertEqual(2, ChannelHistory.objects.filter(id__in=last_history_ids).count()) + self.assertEqual( + 2, ChannelHistory.objects.filter(id__in=last_history_ids).count() + ) class FeedbackModelTests(StudioTestCase): - @classmethod def setUpClass(cls): super(FeedbackModelTests, cls).setUpClass() @@ -1027,25 +1183,34 @@ def setUp(self): def _create_base_feedback_data(self, context, contentnode_id, content_id): base_feedback_data = { - 'context': context, - 'contentnode_id': contentnode_id, - 'content_id': content_id, + "context": context, + "contentnode_id": contentnode_id, + "content_id": content_id, } return base_feedback_data def _create_recommendation_event(self): channel = testdata.channel() - node_where_import_was_initiated = testdata.node({"kind_id": content_kinds.TOPIC, "title": "recomendations provided here"}) + node_where_import_was_initiated = testdata.node( + {"kind_id": content_kinds.TOPIC, "title": "recomendations provided here"} + ) base_feedback_data = self._create_base_feedback_data( - {'model_version': 1, 'breadcrums': "#Title#->Random"}, + {"model_version": 1, "breadcrums": "#Title#->Random"}, node_where_import_was_initiated.id, - node_where_import_was_initiated.content_id + node_where_import_was_initiated.content_id, ) recommendations_event = RecommendationsEvent.objects.create( user=self.user, target_channel_id=channel.id, time_hidden=timezone.now(), - content=[{'content_id': str(uuid4()), 'node_id': str(uuid4()), 'channel_id': str(uuid4()), 'score': 4}], + content=[ + { + "content_id": str(uuid.uuid4()), + "node_id": str(uuid.uuid4()), + "channel_id": str(uuid.uuid4()), + "score": 4, + } + ], **base_feedback_data ) @@ -1053,52 +1218,67 @@ def _create_recommendation_event(self): def test_create_flag_feedback_event(self): channel = testdata.channel("testchannel") - flagged_node = testdata.node({"kind_id": content_kinds.TOPIC, "title": "SuS ContentNode"}) + flagged_node = testdata.node( + {"kind_id": content_kinds.TOPIC, "title": "SuS ContentNode"} + ) base_feedback_data = self._create_base_feedback_data( - {'spam': 'Spam or misleading'}, - flagged_node.id, - flagged_node.content_id + {"spam": "Spam or misleading"}, flagged_node.id, flagged_node.content_id ) flag_feedback_event = FlagFeedbackEvent.objects.create( - user=self.user, - target_channel_id=channel.id, - **base_feedback_data + user=self.user, target_channel_id=channel.id, **base_feedback_data ) self.assertEqual(flag_feedback_event.user, self.user) - self.assertEqual(flag_feedback_event.context['spam'], 'Spam or misleading') + self.assertEqual(flag_feedback_event.context["spam"], "Spam or misleading") def test_create_recommendations_interaction_event(self): # This represents a node that was recommended by the model and was interacted by user! - recommended_node = testdata.node({"kind_id": content_kinds.TOPIC, "title": "This node was recommended by the model"}) + recommended_node = testdata.node( + { + "kind_id": content_kinds.TOPIC, + "title": "This node was recommended by the model", + } + ) base_feedback_data = self._create_base_feedback_data( {"comment": "explicit reason given by user why he rejected this node!"}, recommended_node.id, - recommended_node.content_id - ) + recommended_node.content_id, + ) fk = self._create_recommendation_event().id rec_interaction_event = RecommendationsInteractionEvent.objects.create( - feedback_type='rejected', - feedback_reason='some predefined reasons like (not related)', + feedback_type="rejected", + feedback_reason="some predefined reasons like (not related)", recommendation_event_id=fk, **base_feedback_data ) - self.assertEqual(rec_interaction_event.feedback_type, 'rejected') - self.assertEqual(rec_interaction_event.feedback_reason, 'some predefined reasons like (not related)') + self.assertEqual(rec_interaction_event.feedback_type, "rejected") + self.assertEqual( + rec_interaction_event.feedback_reason, + "some predefined reasons like (not related)", + ) def test_create_recommendations_event(self): channel = testdata.channel() - node_where_import_was_initiated = testdata.node({"kind_id": content_kinds.TOPIC, "title": "recomendations provided here"}) + node_where_import_was_initiated = testdata.node( + {"kind_id": content_kinds.TOPIC, "title": "recomendations provided here"} + ) base_feedback_data = self._create_base_feedback_data( - {'model_version': 1, 'breadcrums': "#Title#->Random"}, + {"model_version": 1, "breadcrums": "#Title#->Random"}, node_where_import_was_initiated.id, - node_where_import_was_initiated.content_id + node_where_import_was_initiated.content_id, ) recommendations_event = RecommendationsEvent.objects.create( user=self.user, target_channel_id=channel.id, time_hidden=timezone.now(), - content=[{'content_id': str(uuid4()), 'node_id': str(uuid4()), 'channel_id': str(uuid4()), 'score': 4}], + content=[ + { + "content_id": str(uuid.uuid4()), + "node_id": str(uuid.uuid4()), + "channel_id": str(uuid.uuid4()), + "score": 4, + } + ], **base_feedback_data ) self.assertEqual(len(recommendations_event.content), 1) - self.assertEqual(recommendations_event.content[0]['score'], 4) + self.assertEqual(recommendations_event.content[0]["score"], 4) diff --git a/contentcuration/contentcuration/tests/test_parser.py b/contentcuration/contentcuration/tests/test_parser.py index 9579db7904..68892df16e 100644 --- a/contentcuration/contentcuration/tests/test_parser.py +++ b/contentcuration/contentcuration/tests/test_parser.py @@ -42,16 +42,20 @@ def number_tests(): def json_tests(): return [ ("{'a': 'b'}", {"a": "b"}), # Test single quotes -> double quotes - ("{\"a\": False}", {"a": False}), # Test False -> false - ("{\"a\": True}", {"a": True}), # Test True -> true + ('{"a": False}', {"a": False}), # Test False -> false + ('{"a": True}', {"a": True}), # Test True -> true ] def test_numbers(number_tests): for val1, val2 in number_tests: - assert extract_value(val1) == val2, "Numbers don't match: {} != {}".format(val1, val2) + assert extract_value(val1) == val2, "Numbers don't match: {} != {}".format( + val1, val2 + ) def test_jsons(json_tests): for val1, val2 in json_tests: - assert load_json_string(val1) == val2, "JSONs don't match: {} != {}".format(val1, val2) + assert load_json_string(val1) == val2, "JSONs don't match: {} != {}".format( + val1, val2 + ) diff --git a/contentcuration/contentcuration/tests/test_rectify_source_field_migraiton_command.py b/contentcuration/contentcuration/tests/test_rectify_source_field_migraiton_command.py index 96382e25af..745ba4a5d2 100644 --- a/contentcuration/contentcuration/tests/test_rectify_source_field_migraiton_command.py +++ b/contentcuration/contentcuration/tests/test_rectify_source_field_migraiton_command.py @@ -15,7 +15,6 @@ class TestRectifyMigrationCommand(StudioAPITestCase): - @classmethod def setUpClass(cls): super(TestRectifyMigrationCommand, cls).setUpClass() @@ -36,7 +35,7 @@ def setUp(self): license_description=self.license_description_original, original_channel_id=None, source_channel_id=None, - author="old author" + author="old author", ) self.user = testdata.user() self.original_channel.editors.add(self.user) @@ -82,15 +81,21 @@ def create_source_channel_and_contentnode(self): return source_node, source_channel def run_migrations(self): - call_command('rectify_incorrect_contentnode_source_fields') + call_command("rectify_incorrect_contentnode_source_fields") def test_two_node_case(self): - base_node, base_channel = self.create_base_channel_and_contentnode(self.original_contentnode, self.original_channel) + base_node, base_channel = self.create_base_channel_and_contentnode( + self.original_contentnode, self.original_channel + ) publish_channel(self.user.id, Channel.objects.get(pk=base_channel.pk).id) # main_tree node still has changed=true even after the publish - for node in Channel.objects.get(pk=base_channel.pk).main_tree.get_family().filter(changed=True): + for node in ( + Channel.objects.get(pk=base_channel.pk) + .main_tree.get_family() + .filter(changed=True) + ): node.changed = False # This should probably again change the changed=true but suprisingly it doesnot # Meaning the changed boolean doesnot change for the main_tree no matter what we do @@ -98,17 +103,28 @@ def test_two_node_case(self): node.save() ContentNode.objects.filter(pk=base_node.pk).update( - modified=datetime.datetime(2023, 7, 5, tzinfo=timezone.utc) + modified=datetime.datetime(2023, 7, 5, tzinfo=timezone.utc) ) self.run_migrations() updated_base_node = ContentNode.objects.get(pk=base_node.pk) - self.assertEqual(updated_base_node.license_description, self.original_contentnode.license_description) - self.assertEqual(Channel.objects.get(pk=base_channel.id).main_tree.get_family().filter(changed=True).exists(), True) + self.assertEqual( + updated_base_node.license_description, + self.original_contentnode.license_description, + ) + self.assertEqual( + Channel.objects.get(pk=base_channel.id) + .main_tree.get_family() + .filter(changed=True) + .exists(), + True, + ) def test_three_node_case_implicit(self): source_node, source_channel = self.create_source_channel_and_contentnode() - base_node, base_channel = self.create_base_channel_and_contentnode(source_node, source_channel) + base_node, base_channel = self.create_base_channel_and_contentnode( + source_node, source_channel + ) source_node.aggregator = "Nami" source_node.save() # Implicit case @@ -119,12 +135,16 @@ def test_three_node_case_implicit(self): publish_channel(self.user.id, Channel.objects.get(pk=base_channel.pk).id) - for node in Channel.objects.get(pk=base_channel.pk).main_tree.get_family().filter(changed=True): + for node in ( + Channel.objects.get(pk=base_channel.pk) + .main_tree.get_family() + .filter(changed=True) + ): node.changed = False node.save() ContentNode.objects.filter(pk=base_node.pk).update( - modified=datetime.datetime(2023, 7, 5, tzinfo=timezone.utc) + modified=datetime.datetime(2023, 7, 5, tzinfo=timezone.utc) ) ContentNode.objects.filter(pk=source_node.pk).update( @@ -134,25 +154,43 @@ def test_three_node_case_implicit(self): self.run_migrations() updated_base_node = ContentNode.objects.get(pk=base_node.pk) updated_source_node = ContentNode.objects.get(pk=source_node.pk) - self.assertEqual(updated_base_node.license_description, self.original_contentnode.license_description) - self.assertEqual(updated_source_node.license_description, self.original_contentnode.license_description) - self.assertEqual(Channel.objects.get(pk=base_channel.id).main_tree.get_family().filter(changed=True).exists(), True) + self.assertEqual( + updated_base_node.license_description, + self.original_contentnode.license_description, + ) + self.assertEqual( + updated_source_node.license_description, + self.original_contentnode.license_description, + ) + self.assertEqual( + Channel.objects.get(pk=base_channel.id) + .main_tree.get_family() + .filter(changed=True) + .exists(), + True, + ) def test_three_node_case_explicit(self): source_node, source_channel = self.create_source_channel_and_contentnode() - base_node, base_channel = self.create_base_channel_and_contentnode(source_node, source_channel) + base_node, base_channel = self.create_base_channel_and_contentnode( + source_node, source_channel + ) source_node.license_description = "luffy" base_node.license_description = "zoro" base_node.save() source_node.save() publish_channel(self.user.id, Channel.objects.get(pk=base_channel.pk).id) - for node in Channel.objects.get(pk=base_channel.pk).main_tree.get_family().filter(changed=True): + for node in ( + Channel.objects.get(pk=base_channel.pk) + .main_tree.get_family() + .filter(changed=True) + ): node.changed = False node.save() ContentNode.objects.filter(pk=base_node.pk).update( - modified=datetime.datetime(2023, 7, 5, tzinfo=timezone.utc) + modified=datetime.datetime(2023, 7, 5, tzinfo=timezone.utc) ) ContentNode.objects.filter(pk=source_node.pk).update( @@ -162,6 +200,18 @@ def test_three_node_case_explicit(self): self.run_migrations() updated_base_node = ContentNode.objects.get(pk=base_node.pk) updated_source_node = ContentNode.objects.get(pk=source_node.pk) - self.assertEqual(updated_base_node.license_description, self.original_contentnode.license_description) - self.assertEqual(updated_source_node.license_description, self.original_contentnode.license_description) - self.assertEqual(Channel.objects.get(pk=base_channel.id).main_tree.get_family().filter(changed=True).exists(), True) + self.assertEqual( + updated_base_node.license_description, + self.original_contentnode.license_description, + ) + self.assertEqual( + updated_source_node.license_description, + self.original_contentnode.license_description, + ) + self.assertEqual( + Channel.objects.get(pk=base_channel.id) + .main_tree.get_family() + .filter(changed=True) + .exists(), + True, + ) diff --git a/contentcuration/contentcuration/tests/test_rest_framework.py b/contentcuration/contentcuration/tests/test_rest_framework.py index 6cb0acfd6a..af319ec4e1 100644 --- a/contentcuration/contentcuration/tests/test_rest_framework.py +++ b/contentcuration/contentcuration/tests/test_rest_framework.py @@ -1,16 +1,7 @@ -from __future__ import absolute_import - -import json - import pytest from django.urls import reverse_lazy -from le_utils.constants import content_kinds -from le_utils.constants import exercises from .base import BaseAPITestCase -from contentcuration.models import AssessmentItem -from contentcuration.models import ContentNode -from contentcuration.models import File from contentcuration.models import User pytestmark = pytest.mark.django_db @@ -39,7 +30,7 @@ def test_unauthorized_get(self): def test_readonly_fields(self): original_version = self.channel.version url = reverse_lazy("channel-list") + "/" + self.channel.pk - response = self.put( + self.put( url, { "version": original_version + 1, @@ -49,109 +40,3 @@ def test_readonly_fields(self): ) self.channel.refresh_from_db() self.assertEqual(original_version, self.channel.version) - - -# TODO: rtibbles - update tests to test sync behaviour. -@pytest.mark.skip -class AssessmentItemTestCase(BaseAPITestCase): - def test_bulk_update(self): - exercise = ContentNode.objects.filter(kind=content_kinds.EXERCISE).first() - item1 = AssessmentItem.objects.create(contentnode=exercise) - item2 = AssessmentItem.objects.create(contentnode=exercise) - item3 = AssessmentItem.objects.create(contentnode=exercise) - item1dict = {} - item2dict = {} - item3dict = {} - for field in AssessmentItem._meta.fields: - attname = field.attname - set_attname = attname - if attname == "contentnode_id": - set_attname = "contentnode" - item1dict[set_attname] = getattr(item1, attname) - item2dict[set_attname] = getattr(item2, attname) - item3dict[set_attname] = getattr(item3, attname) - item1dict["question"] = "test" - item2dict["type"] = "test" - self.client.put( - reverse_lazy("assessmentitem-list"), - json.dumps([item1dict, item2dict, item3dict]), - content_type="application/json", - ) - item1.refresh_from_db() - self.assertEqual(item1.question, "test") - item2.refresh_from_db() - self.assertEqual(item2.type, "test") - item3.refresh_from_db() - self.assertEqual(item3.question, item3dict["question"]) - - def test_bulk_update_non_existent_item(self): - exercise = ContentNode.objects.filter(kind=content_kinds.EXERCISE).first() - item1 = AssessmentItem.objects.create(contentnode=exercise) - item1dict = {} - item2dict = {} - item3dict = {} - for field in AssessmentItem._meta.fields: - attname = field.attname - set_attname = attname - if attname == "contentnode_id": - set_attname = "contentnode" - item1dict[set_attname] = getattr(item1, attname) - item2dict[set_attname] = getattr(item1, attname) - item3dict[set_attname] = getattr(item1, attname) - item2dict["id"] = 10000 - item3dict["id"] = 10001 - item1dict["question"] = "test" - response = self.client.put( - reverse_lazy("assessmentitem-list"), - json.dumps([item1dict, item2dict, item3dict]), - content_type="application/json", - ) - self.assertEqual(response.status_code, 400) - - def test_bulk_update_checksum_file_not_associated_create_new_file_object(self): - exercise = ContentNode.objects.filter(kind=content_kinds.EXERCISE).first() - item1 = AssessmentItem.objects.create(contentnode=exercise) - item1dict = {} - for field in AssessmentItem._meta.fields: - attname = field.attname - set_attname = attname - if attname == "contentnode_id": - set_attname = "contentnode" - item1dict[set_attname] = getattr(item1, attname) - checksum = "b6d83d66859b0cf095ef81120ef98e1f" - item1dict["question"] = ( - "![I'm an image!]($" - + exercises.IMG_PLACEHOLDER - + "/{checksum}.gif)".format(checksum=checksum) - ) - File.objects.create(checksum=checksum) - self.client.put( - reverse_lazy("assessmentitem-list"), - json.dumps([item1dict]), - content_type="application/json", - ) - self.assertEqual(File.objects.filter(checksum=checksum).count(), 2) - - def test_bulk_update_checksum_file_associated_use_existing_file_object(self): - exercise = ContentNode.objects.filter(kind=content_kinds.EXERCISE).first() - item1 = AssessmentItem.objects.create(contentnode=exercise) - item1dict = {} - for field in AssessmentItem._meta.fields: - attname = field.attname - set_attname = attname - if attname == "contentnode_id": - set_attname = "contentnode" - item1dict[set_attname] = getattr(item1, attname) - checksum = "b6d83d66859b0cf095ef81120ef98e1f" - item1dict["question"] = ( - "![I'm an image!]($" - + exercises.IMG_PLACEHOLDER - + "/{checksum}.gif)".format(checksum=checksum) - ) - File.objects.create(checksum=checksum, assessment_item=item1) - self.client.put( - reverse_lazy("assessmentitem-list"), - json.dumps([item1dict]), - content_type="application/json", - ) - self.assertEqual(File.objects.filter(checksum=checksum).count(), 1) diff --git a/contentcuration/contentcuration/tests/test_restore_channel.py b/contentcuration/contentcuration/tests/test_restore_channel.py index a4d1e13a39..6c5e1500ff 100644 --- a/contentcuration/contentcuration/tests/test_restore_channel.py +++ b/contentcuration/contentcuration/tests/test_restore_channel.py @@ -23,58 +23,61 @@ thumbnail_path = "/content/thumbnail.png" ASSESSMENT_DATA = { - 'input-question-test': { - 'template': 'perseus/input_question.json', - 'type': exercises.INPUT_QUESTION, - 'question': "Input question", - 'question_images': [{"name": "test.jpg", "width": 12.71, "height": 12.12}], - 'hints': [{'hint': 'Hint 1'}], - 'answers': [ - {'answer': '1', 'correct': True, 'images': []}, - {'answer': '2', 'correct': True, 'images': []} + "input-question-test": { + "template": "perseus/input_question.json", + "type": exercises.INPUT_QUESTION, + "question": "Input question", + "question_images": [{"name": "test.jpg", "width": 12.71, "height": 12.12}], + "hints": [{"hint": "Hint 1"}], + "answers": [ + {"answer": "1", "correct": True, "images": []}, + {"answer": "2", "correct": True, "images": []}, ], - 'order': 0 + "order": 0, }, - 'multiple-selection-test': { - 'template': 'perseus/multiple_selection.json', - 'type': exercises.MULTIPLE_SELECTION, - 'question': "Multiple selection question", - 'question_images': [], - 'hints': [], - 'answers': [ - {'answer': 'A', 'correct': True, 'images': []}, - {'answer': 'B', 'correct': True, 'images': []}, - {'answer': 'C', 'correct': False, 'images': []}, + "multiple-selection-test": { + "template": "perseus/multiple_selection.json", + "type": exercises.MULTIPLE_SELECTION, + "question": "Multiple selection question", + "question_images": [], + "hints": [], + "answers": [ + {"answer": "A", "correct": True, "images": []}, + {"answer": "B", "correct": True, "images": []}, + {"answer": "C", "correct": False, "images": []}, ], - 'multiple_select': True, - 'order': 1, - 'randomize': False + "multiple_select": True, + "order": 1, + "randomize": False, }, - 'single-selection-test': { - 'template': 'perseus/multiple_selection.json', - 'type': exercises.SINGLE_SELECTION, - 'question': "Single select question", - 'question_images': [], - 'hints': [{'hint': 'Hint test'}], - 'answers': [ - {'answer': 'Correct answer', 'correct': True, 'images': []}, - {'answer': 'Incorrect answer', 'correct': False, 'images': []}, + "single-selection-test": { + "template": "perseus/multiple_selection.json", + "type": exercises.SINGLE_SELECTION, + "question": "Single select question", + "question_images": [], + "hints": [{"hint": "Hint test"}], + "answers": [ + {"answer": "Correct answer", "correct": True, "images": []}, + {"answer": "Incorrect answer", "correct": False, "images": []}, ], - 'multiple_select': False, - 'order': 2, - 'randomize': True + "multiple_select": False, + "order": 2, + "randomize": True, + }, + "perseus-question-test": { + "template": "perseus/perseus_question.json", + "type": exercises.PERSEUS_QUESTION, + "order": 3, + "raw_data": "{}", }, - 'perseus-question-test': { - 'template': 'perseus/perseus_question.json', - 'type': exercises.PERSEUS_QUESTION, - 'order': 3, - 'raw_data': '{}' - } } class ChannelRestoreUtilityFunctionTestCase(StudioTestCase): - @patch("contentcuration.utils.import_tools.write_to_thumbnail_file", return_value=thumbnail_path) + @patch( + "contentcuration.utils.import_tools.write_to_thumbnail_file", + return_value=thumbnail_path, + ) def setUp(self, thumb_mock): self.id = uuid.uuid4().hex self.name = "test name" @@ -108,7 +111,9 @@ def test_restore_channel_thumbnail(self): self.assertEqual(self.channel.thumbnail, thumbnail_path) def test_restore_channel_thumbnail_encoding(self): - self.assertEqual(self.channel.thumbnail_encoding["base64"], self.thumbnail_encoding) + self.assertEqual( + self.channel.thumbnail_encoding["base64"], self.thumbnail_encoding + ) def test_restore_channel_version(self): self.assertEqual(self.channel.version, self.version) @@ -117,63 +122,69 @@ def test_restore_channel_version(self): class PerseusRestoreTestCase(StudioTestCase): def setUp(self): super(PerseusRestoreTestCase, self).setUp() - image_path = generate_object_storage_name('test', 'test.png') - default_storage.save(image_path, BytesIO(b'test')) + image_path = generate_object_storage_name("test", "test.png") + default_storage.save(image_path, BytesIO(b"test")) def test_process_content(self): tests = [ + {"content": "test 1", "output": "test 1", "images": {}}, { - "content": 'test 1', - "output": 'test 1', - 'images': {} + "content": "test 2 ![test](${☣ LOCALPATH}/images/test.png)", + "output": "test 2 ![test](${☣ CONTENTSTORAGE}/test.png)", + "images": {}, }, { - "content": 'test 2 ![test](${☣ LOCALPATH}/images/test.png)', - "output": 'test 2 ![test](${☣ CONTENTSTORAGE}/test.png)', - 'images': {} + "content": "test 3 ![](${☣ LOCALPATH}/images/test.png)", + "output": "test 3 ![](${☣ CONTENTSTORAGE}/test.png =50x50)", + "images": { + "${☣ LOCALPATH}/images/test.png": {"width": 50, "height": 50} + }, }, { - "content": 'test 3 ![](${☣ LOCALPATH}/images/test.png)', - "output": 'test 3 ![](${☣ CONTENTSTORAGE}/test.png =50x50)', - 'images': { - '${☣ LOCALPATH}/images/test.png': { - 'width': 50, - 'height': 50 - } - } + "content": "test 4 ![](${☣ LOCALPATH}/images/test.png) ![](${☣ LOCALPATH}/images/test.png)", + "output": "test 4 ![](${☣ CONTENTSTORAGE}/test.png) ![](${☣ CONTENTSTORAGE}/test.png)", + "images": {}, }, { - "content": 'test 4 ![](${☣ LOCALPATH}/images/test.png) ![](${☣ LOCALPATH}/images/test.png)', - "output": 'test 4 ![](${☣ CONTENTSTORAGE}/test.png) ![](${☣ CONTENTSTORAGE}/test.png)', - 'images': {} + "content": "test 5  $\\sqrt{36}+\\frac{1}{2}$ ", + "output": "test 5 $$\\sqrt{36}+\\frac{1}{2}$$", + "images": {}, }, { - "content": 'test 5  $\\sqrt{36}+\\frac{1}{2}$ ', - "output": 'test 5 $$\\sqrt{36}+\\frac{1}{2}$$', - 'images': {} + "content": "test 6 $\\frac{1}{2}$ $\\frac{3}{2}$", + "output": "test 6 $$\\frac{1}{2}$$ $$\\frac{3}{2}$$", + "images": {}, }, - { - "content": 'test 6 $\\frac{1}{2}$ $\\frac{3}{2}$', - "output": 'test 6 $$\\frac{1}{2}$$ $$\\frac{3}{2}$$', - 'images': {} - } ] for test in tests: result = process_content(test, mixer.blend(AssessmentItem)) - self.assertEqual(result, test['output']) + self.assertEqual(result, test["output"]) def test_generate_assessment_item(self): # Run in Spanish to ensure we are properly creating JSON with non-localized numbers activate("es-es") for assessment_id, data in list(ASSESSMENT_DATA.items()): - assessment_data = json.loads(render_to_string(data['template'], data).encode('utf-8', "ignore")) - assessment_item = generate_assessment_item(assessment_id, data['order'], data['type'], assessment_data) - self.assertEqual(assessment_item.type, data['type']) - self.assertEqual(assessment_item.question, data.get('question', '')) - self.assertEqual(assessment_item.randomize, bool(data.get('randomize'))) - self.assertEqual(assessment_item.raw_data, data.get('raw_data', '')) + assessment_data = json.loads( + render_to_string(data["template"], data).encode("utf-8", "ignore") + ) + assessment_item = generate_assessment_item( + assessment_id, data["order"], data["type"], assessment_data + ) + self.assertEqual(assessment_item.type, data["type"]) + self.assertEqual(assessment_item.question, data.get("question", "")) + self.assertEqual(assessment_item.randomize, bool(data.get("randomize"))) + self.assertEqual(assessment_item.raw_data, data.get("raw_data", "")) for hint in json.loads(assessment_item.hints): - self.assertTrue(any(h for h in data['hints'] if h['hint'] == hint['hint'])) + self.assertTrue( + any(h for h in data["hints"] if h["hint"] == hint["hint"]) + ) for answer in json.loads(assessment_item.answers): - self.assertTrue(any(a for a in data['answers'] if a['answer'] == str(answer['answer']) and a['correct'] == answer['correct'])) + self.assertTrue( + any( + a + for a in data["answers"] + if a["answer"] == str(answer["answer"]) + and a["correct"] == answer["correct"] + ) + ) deactivate() diff --git a/contentcuration/contentcuration/tests/test_secrettoken_model.py b/contentcuration/contentcuration/tests/test_secrettoken_model.py index c799b1a42e..fa0ecfadf3 100755 --- a/contentcuration/contentcuration/tests/test_secrettoken_model.py +++ b/contentcuration/contentcuration/tests/test_secrettoken_model.py @@ -1,5 +1,4 @@ #!/usr/bin/env python - from django.test import TestCase from le_utils import proquint diff --git a/contentcuration/contentcuration/tests/test_serializers.py b/contentcuration/contentcuration/tests/test_serializers.py index 9ea2e2529c..0b5c2b2661 100644 --- a/contentcuration/contentcuration/tests/test_serializers.py +++ b/contentcuration/contentcuration/tests/test_serializers.py @@ -1,6 +1,7 @@ -from __future__ import absolute_import +import uuid from django.db.models.query import QuerySet +from django.utils import timezone from le_utils.constants import content_kinds from mock import Mock from rest_framework import serializers @@ -9,11 +10,14 @@ from contentcuration.models import Channel from contentcuration.models import ContentNode from contentcuration.models import DEFAULT_CONTENT_DEFAULTS +from contentcuration.models import RecommendationsEvent from contentcuration.tests import testdata from contentcuration.viewsets.channel import ChannelSerializer as BaseChannelSerializer from contentcuration.viewsets.common import ContentDefaultsSerializer from contentcuration.viewsets.contentnode import ContentNodeSerializer from contentcuration.viewsets.feedback import FlagFeedbackEventSerializer +from contentcuration.viewsets.feedback import RecommendationsEventSerializer +from contentcuration.viewsets.feedback import RecommendationsInteractionEventSerializer def ensure_no_querysets_in_serializer(object): @@ -29,7 +33,9 @@ def ensure_no_querysets_in_serializer(object): class ContentNodeSerializerTestCase(BaseAPITestCase): def setUp(self): super(ContentNodeSerializerTestCase, self).setUp() - self.data = dict(extra_fields=dict(options=dict(modality="QUIZ")), complete=True) + self.data = dict( + extra_fields=dict(options=dict(modality="QUIZ")), complete=True + ) self.node = ContentNode(kind_id=content_kinds.VIDEO) @property @@ -40,7 +46,13 @@ def test_no_completion_criteria(self): self.assertTrue(self.serializer.is_valid()) def test_completion_criteria__valid(self): - self.data["extra_fields"]["options"].update(completion_criteria={"model": "time", "threshold": 10, "learner_managed": True}) + self.data["extra_fields"]["options"].update( + completion_criteria={ + "model": "time", + "threshold": 10, + "learner_managed": True, + } + ) serializer = self.serializer serializer.is_valid() try: @@ -49,7 +61,9 @@ def test_completion_criteria__valid(self): self.fail("Completion criteria should be valid") def test_completion_criteria__invalid(self): - self.data["extra_fields"]["options"].update(completion_criteria={"model": "time", "threshold": "test"}) + self.data["extra_fields"]["options"].update( + completion_criteria={"model": "time", "threshold": "test"} + ) serializer = self.serializer serializer.is_valid() with self.assertRaises(serializers.ValidationError): @@ -65,17 +79,19 @@ def test_repr_doesnt_evaluate_querysets(self): ContentNode.objects.filter(node_id__in=node_ids), many=True ) - object = ContentNodeSerializer( - ContentNode.objects.get(node_id=node_ids[0]) - ) + object = ContentNodeSerializer(ContentNode.objects.get(node_id=node_ids[0])) # Ensure we don't evaluate querysets when repr is called on a Serializer. See docs for # no_field_eval_repr in contentcuration/serializers.py for more info. obj_string = repr(object) - assert "QuerySet" not in obj_string, "object __repr__ contains queryset: {}".format(obj_string) + assert ( + "QuerySet" not in obj_string + ), "object __repr__ contains queryset: {}".format(obj_string) objs_string = repr(objects) - assert "QuerySet" not in objs_string, "objects __repr__ contains queryset: {}".format(objs_string) + assert ( + "QuerySet" not in objs_string + ), "objects __repr__ contains queryset: {}".format(objs_string) class ContentDefaultsSerializerTestCase(BaseAPITestCase): @@ -117,13 +133,25 @@ def test_update(self): self.assertEqual(defaults, s.save()) def test_update__merge(self): - defaults = dict(author="Buster", aggregator="Aggregators R US", provider="USA",) + defaults = dict( + author="Buster", + aggregator="Aggregators R US", + provider="USA", + ) s = ContentDefaultsSerializer( - defaults, data=dict(author="Duster", provider="Canada",) + defaults, + data=dict( + author="Duster", + provider="Canada", + ), ) self.assertTrue(s.is_valid()) self.assertEqual( - dict(author="Duster", aggregator="Aggregators R US", provider="Canada",), + dict( + author="Duster", + aggregator="Aggregators R US", + provider="Canada", + ), s.save(), ) @@ -199,31 +227,218 @@ def setUp(self): def _create_base_feedback_data(self, context, contentnode_id, content_id): base_feedback_data = { - 'context': context, - 'contentnode_id': contentnode_id, - 'content_id': content_id, + "context": context, + "contentnode_id": contentnode_id, + "content_id": content_id, } return base_feedback_data def test_deserialization_and_validation(self): data = { - 'user': self.user.id, - 'target_channel_id': str(self.channel.id), - 'context': {'test_key': 'test_value'}, - 'contentnode_id': str(self.flagged_node.id), - 'content_id': str(self.flagged_node.content_id), - 'feedback_type': 'FLAGGED', - 'feedback_reason': 'Reason1.....' + "user": self.user.id, + "target_channel_id": str(self.channel.id), + "context": {"test_key": "test_value"}, + "contentnode_id": str(self.flagged_node.id), + "content_id": str(self.flagged_node.content_id), + "feedback_type": "FLAGGED", + "feedback_reason": "Reason1.....", } serializer = FlagFeedbackEventSerializer(data=data) self.assertTrue(serializer.is_valid(), serializer.errors) instance = serializer.save() - self.assertEqual(instance.context, data['context']) - self.assertEqual(instance.user.id, data['user']) - self.assertEqual(instance.feedback_type, data['feedback_type']) - self.assertEqual(instance.feedback_reason, data['feedback_reason']) + self.assertEqual(instance.context, data["context"]) + self.assertEqual(instance.user.id, data["user"]) + self.assertEqual(instance.feedback_type, data["feedback_type"]) + self.assertEqual(instance.feedback_reason, data["feedback_reason"]) def test_invalid_data(self): - data = {'context': 'invalid'} + data = {"context": "invalid"} serializer = FlagFeedbackEventSerializer(data=data) self.assertFalse(serializer.is_valid()) + + +class RecommendationsInteractionEventSerializerTestCase(BaseAPITestCase): + def setUp(self): + super(RecommendationsInteractionEventSerializerTestCase, self).setUp() + self.channel = testdata.channel("testchannel") + self.interaction_node = testdata.node( + { + "kind_id": content_kinds.VIDEO, + "title": "Recommended Video content", + }, + ) + self.node_where_import_is_initiated = testdata.node( + { + "kind_id": content_kinds.TOPIC, + "title": "Node where content is imported", + }, + ) + self.recommendation_event = RecommendationsEvent.objects.create( + user=self.user, + target_channel_id=self.channel.id, + content_id=self.node_where_import_is_initiated.content_id, + contentnode_id=self.node_where_import_is_initiated.id, + context={"model_version": 1, "breadcrumbs": "#Title#->Random"}, + time_hidden=timezone.now(), + content=[ + { + "content_id": str(uuid.uuid4()), + "node_id": str(uuid.uuid4()), + "channel_id": str(uuid.uuid4()), + "score": 4, + } + ], + ) + + def test_deserialization_and_validation(self): + data = { + "context": {"test_key": "test_value"}, + "contentnode_id": str(self.interaction_node.id), + "content_id": str(self.interaction_node.content_id), + "feedback_type": "IGNORED", + "feedback_reason": "----", + "recommendation_event_id": str(self.recommendation_event.id), + } + serializer = RecommendationsInteractionEventSerializer(data=data) + self.assertTrue(serializer.is_valid(), serializer.errors) + instance = serializer.save() + self.assertEqual(instance.context, data["context"]) + self.assertEqual(instance.feedback_type, data["feedback_type"]) + self.assertEqual( + str(instance.recommendation_event_id), data["recommendation_event_id"] + ) + + def test_bulk_deserialization_and_validation(self): + bulk_data = [ + { + "context": {"test_key": "test_value_1"}, + "contentnode_id": str(self.interaction_node.id), + "content_id": str(self.interaction_node.content_id), + "feedback_type": "IGNORED", + "feedback_reason": "----", + "recommendation_event_id": str(self.recommendation_event.id), + }, + { + "context": {"test_key": "test_value_2"}, + "contentnode_id": str(self.interaction_node.id), + "content_id": str(self.interaction_node.content_id), + "feedback_type": "PREVIEWED", + "feedback_reason": "++++", + "recommendation_event_id": str(self.recommendation_event.id), + }, + ] + serializer = RecommendationsInteractionEventSerializer( + data=bulk_data, many=True + ) + self.assertTrue(serializer.is_valid(), serializer.errors) + instances = serializer.save() + self.assertEqual(len(instances), 2) + self.assertEqual(instances[0].context, bulk_data[0]["context"]) + self.assertEqual(instances[1].feedback_type, bulk_data[1]["feedback_type"]) + + def test_invalid_data(self): + data = {"context": "invalid"} + serializer = RecommendationsInteractionEventSerializer(data=data) + self.assertFalse(serializer.is_valid()) + + data = { + "context": {"test_key": "test_value"}, + "contentnode_id": str(self.interaction_node.id), + "content_id": str(self.interaction_node.content_id), + "feedback_type": "INVALID_TYPE", + "feedback_reason": "-----", + "recommendation_event_id": "invalid-uuid", + } + serializer = RecommendationsInteractionEventSerializer(data=data) + self.assertFalse(serializer.is_valid()) + + def test_invalid_bulk_data(self): + # Missing 'feedback_type' + bulk_data = [ + { + "context": {"test_key": "test_value_1"}, + "contentnode_id": str(self.interaction_node.id), + "content_id": str(self.interaction_node.content_id), + "feedback_type": "IGNORED", + "feedback_reason": "----", + "recommendation_event_id": str(self.recommendation_event.id), + }, + { + "context": {"test_key": "test_value_2"}, + "contentnode_id": str(self.interaction_node.id), + "content_id": str(self.interaction_node.content_id), + "feedback_reason": "----", + "recommendation_event_id": str(self.recommendation_event.id), + }, + ] + serializer = RecommendationsInteractionEventSerializer( + data=bulk_data, many=True + ) + self.assertFalse(serializer.is_valid()) + self.assertIn("feedback_type", str(serializer.errors)) + + +class RecommendationsEventSerializerTestCase(BaseAPITestCase): + def setUp(self): + super(RecommendationsEventSerializerTestCase, self).setUp() + self.channel = testdata.channel("testchannel") + self.node_where_import_is_initiated = testdata.node( + { + "kind_id": content_kinds.TOPIC, + "title": "Title of the topic", + }, + ) + + def test_deserialization_and_validation(self): + data = { + "user": self.user.id, + "target_channel_id": str(self.channel.id), + "context": {"model_version": 1, "breadcrumbs": "#Title#->Random"}, + "contentnode_id": str(self.node_where_import_is_initiated.id), + "content_id": str(self.node_where_import_is_initiated.content_id), + "time_hidden": timezone.now().isoformat(), + "content": [ + { + "content_id": str(uuid.uuid4()), + "node_id": str(uuid.uuid4()), + "channel_id": str(uuid.uuid4()), + "score": 4, + } + ], + } + serializer = RecommendationsEventSerializer(data=data) + self.assertTrue(serializer.is_valid(), serializer.errors) + instance = serializer.save() + self.assertEqual(instance.context, data["context"]) + self.assertEqual(instance.user.id, data["user"]) + self.assertEqual( + str(instance.contentnode_id).replace("-", ""), + data["contentnode_id"].replace("-", ""), + ) + self.assertEqual(instance.content, data["content"]) + + def test_invalid_data(self): + # Test with missing required fields + data = {"context": "invalid"} + serializer = RecommendationsEventSerializer(data=data) + self.assertFalse(serializer.is_valid()) + + # Test with invalid contentnode_id + data = { + "user": self.user.id, + "target_channel_id": str(self.channel.id), + "context": {"model_version": 1, "breadcrumbs": "#Title#->Random"}, + "contentnode_id": "invalid-uuid", + "content_id": str(self.node_where_import_is_initiated.content_id), + "time_hidden": timezone.now().isoformat(), + "content": [ + { + "content_id": str(uuid.uuid4()), + "node_id": str(uuid.uuid4()), + "channel_id": str(uuid.uuid4()), + "score": 4, + } + ], + } + serializer = RecommendationsEventSerializer(data=data) + self.assertFalse(serializer.is_valid()) diff --git a/contentcuration/contentcuration/tests/test_setlanguage.py b/contentcuration/contentcuration/tests/test_setlanguage.py index 941db98f5d..9654f351c7 100644 --- a/contentcuration/contentcuration/tests/test_setlanguage.py +++ b/contentcuration/contentcuration/tests/test_setlanguage.py @@ -36,7 +36,11 @@ def test_setlang(self): The set_language view can be used to change the session language. """ lang_code = self._get_inactive_language_code() - response = self.client.post(reverse("set_language"), self.set_post_data(lang_code), content_type='application/json') + response = self.client.post( + reverse("set_language"), + self.set_post_data(lang_code), + content_type="application/json", + ) self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), @@ -51,7 +55,11 @@ def test_setlang_next_valid(self): """ lang_code = self._get_inactive_language_code() next_url = reverse("channels") - response = self.client.post(reverse("set_language"), self.set_post_data(lang_code, next_url), content_type='application/json') + response = self.client.post( + reverse("set_language"), + self.set_post_data(lang_code, next_url), + content_type="application/json", + ) self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), @@ -66,7 +74,11 @@ def test_setlang_next_invalid(self): """ lang_code = self._get_inactive_language_code() next_url = "/not/a/real/url" - response = self.client.post(reverse("set_language"), self.set_post_data(lang_code, next_url), content_type='application/json') + response = self.client.post( + reverse("set_language"), + self.set_post_data(lang_code, next_url), + content_type="application/json", + ) self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), @@ -79,7 +91,11 @@ def test_setlang_null(self): Test language code set to null which shoul direct to default language "en" """ lang_code = self._get_inactive_language_code() - response = self.client.post(reverse("set_language"), self.set_post_data(lang_code), content_type='application/json') + response = self.client.post( + reverse("set_language"), + self.set_post_data(lang_code), + content_type="application/json", + ) self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), @@ -87,7 +103,11 @@ def test_setlang_null(self): ) self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code) lang_code = None - response = self.client.post(reverse("set_language"), self.set_post_data(lang_code), content_type='application/json') + response = self.client.post( + reverse("set_language"), + self.set_post_data(lang_code), + content_type="application/json", + ) self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), @@ -101,7 +121,11 @@ def test_setlang_null_next_valid(self): The user is redirected to the "next" argument. """ lang_code = self._get_inactive_language_code() - response = self.client.post(reverse("set_language"), self.set_post_data(lang_code), content_type='application/json') + response = self.client.post( + reverse("set_language"), + self.set_post_data(lang_code), + content_type="application/json", + ) self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), @@ -110,7 +134,11 @@ def test_setlang_null_next_valid(self): self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code) next_url = reverse("channels") lang_code = None - response = self.client.post(reverse("set_language"), self.set_post_data(lang_code, next_url), content_type='application/json') + response = self.client.post( + reverse("set_language"), + self.set_post_data(lang_code, next_url), + content_type="application/json", + ) self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), @@ -124,7 +152,11 @@ def test_setlang_null_next_invalid(self): The user is redirected to user redirect if the "next" argument is invalid. """ lang_code = self._get_inactive_language_code() - response = self.client.post(reverse("set_language"), self.set_post_data(lang_code), content_type='application/json') + response = self.client.post( + reverse("set_language"), + self.set_post_data(lang_code), + content_type="application/json", + ) self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), @@ -133,7 +165,11 @@ def test_setlang_null_next_invalid(self): self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code) next_url = "/not/a/real/url" lang_code = None - response = self.client.post(reverse("set_language"), self.set_post_data(lang_code, next_url), content_type='application/json') + response = self.client.post( + reverse("set_language"), + self.set_post_data(lang_code, next_url), + content_type="application/json", + ) self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), @@ -146,5 +182,9 @@ def test_setlang_get(self): The set_language view is forbidden to be accessed via GET """ lang_code = self._get_inactive_language_code() - response = self.client.get(reverse("set_language"), params=self.set_post_data(lang_code), content_type='application/json') + response = self.client.get( + reverse("set_language"), + params=self.set_post_data(lang_code), + content_type="application/json", + ) self.assertEqual(type(response), HttpResponseNotAllowed) diff --git a/contentcuration/contentcuration/tests/test_settings.py b/contentcuration/contentcuration/tests/test_settings.py index 48b1b39db6..30f4931db7 100644 --- a/contentcuration/contentcuration/tests/test_settings.py +++ b/contentcuration/contentcuration/tests/test_settings.py @@ -11,7 +11,12 @@ class SettingsTestCase(BaseAPITestCase): def test_username_change(self): - data = json.dumps({"first_name": "New firstname", "last_name": "New lastname", }) + data = json.dumps( + { + "first_name": "New firstname", + "last_name": "New lastname", + } + ) request = self.create_post_request( reverse_lazy("update_user_full_name"), data=data, diff --git a/contentcuration/contentcuration/tests/test_storage_common.py b/contentcuration/contentcuration/tests/test_storage_common.py index 29ad9f59c9..f89534c194 100644 --- a/contentcuration/contentcuration/tests/test_storage_common.py +++ b/contentcuration/contentcuration/tests/test_storage_common.py @@ -16,6 +16,7 @@ from contentcuration.utils.storage_common import determine_content_type from contentcuration.utils.storage_common import get_presigned_upload_url from contentcuration.utils.storage_common import UnknownStorageBackendError + # The modules we'll test @@ -77,7 +78,11 @@ def test_raises_error(self): """ with pytest.raises(UnknownStorageBackendError): get_presigned_upload_url( - "nice", "err", 5, 0, storage=self.STORAGE, + "nice", + "err", + 5, + 0, + storage=self.STORAGE, ) @@ -187,12 +192,16 @@ def test_can_upload_file_to_presigned_url(self): # S3 expects a base64-encoded MD5 checksum md5 = hashlib.md5(file_contents) md5_checksum = md5.hexdigest() - md5_checksum_base64 = codecs.encode(codecs.decode(md5_checksum, "hex"), "base64").decode() + md5_checksum_base64 = codecs.encode( + codecs.decode(md5_checksum, "hex"), "base64" + ).decode() filename = "blahfile.jpg" filepath = generate_object_storage_name(md5_checksum, filename) - ret = get_presigned_upload_url(filepath, md5_checksum_base64, 1000, len(file_contents)) + ret = get_presigned_upload_url( + filepath, md5_checksum_base64, 1000, len(file_contents) + ) url = ret["uploadURL"] content_type = ret["mimetype"] @@ -201,6 +210,6 @@ def test_can_upload_file_to_presigned_url(self): data=file, headers={ "Content-Type": content_type, - } + }, ) resp.raise_for_status() diff --git a/contentcuration/contentcuration/tests/test_sushibar_endpoints.py b/contentcuration/contentcuration/tests/test_sushibar_endpoints.py index 6c88fbc29d..861cea196d 100644 --- a/contentcuration/contentcuration/tests/test_sushibar_endpoints.py +++ b/contentcuration/contentcuration/tests/test_sushibar_endpoints.py @@ -1,11 +1,6 @@ -from __future__ import absolute_import -from __future__ import print_function - import functools import json import os -from builtins import str -from builtins import zip from django.urls import reverse_lazy diff --git a/contentcuration/contentcuration/tests/test_sync.py b/contentcuration/contentcuration/tests/test_sync.py index 923d8ad541..8d011cc1db 100644 --- a/contentcuration/contentcuration/tests/test_sync.py +++ b/contentcuration/contentcuration/tests/test_sync.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import uuid from django.urls import reverse @@ -38,7 +36,9 @@ class SyncTestCase(StudioTestCase): def setUp(self): super(SyncTestCase, self).setUpBase() - self.derivative_channel = Channel.objects.create(name="testchannel", actor_id=self.admin_user.id) + self.derivative_channel = Channel.objects.create( + name="testchannel", actor_id=self.admin_user.id + ) self.channel.main_tree.copy_to(self.derivative_channel.main_tree) self.derivative_channel.main_tree.refresh_from_db() self.derivative_channel.save() @@ -50,8 +50,8 @@ def setUp(self): # Put all nodes into a clean state so we can track when syncing # causes changes in the tree. - mark_all_nodes_as_published(self.channel) - mark_all_nodes_as_published(self.derivative_channel) + mark_all_nodes_as_published(self.channel.main_tree) + mark_all_nodes_as_published(self.derivative_channel.main_tree) def _add_temp_file_to_content_node(self, node): new_file = create_temp_file("mybytes") @@ -130,10 +130,11 @@ def test_sync_files_remove(self): """ Tests whether sync_files remove additional files from the copied node or not. """ - video_node = (self.channel.main_tree.get_descendants() - .filter(kind_id=content_kinds.VIDEO) - .first() - ) + video_node = ( + self.channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.VIDEO) + .first() + ) video_node_copy = self.derivative_channel.main_tree.get_descendants().get( source_node_id=video_node.node_id ) @@ -149,7 +150,9 @@ def test_sync_files_remove(self): self.assertEqual(video_node.files.count(), video_node_copy.files.count()) for file in File.objects.filter(contentnode=video_node.id): - self.assertTrue(video_node_copy.files.filter(checksum=file.checksum).exists()) + self.assertTrue( + video_node_copy.files.filter(checksum=file.checksum).exists() + ) def test_sync_assessment_item_add(self): """ @@ -222,29 +225,21 @@ def test_sync_tags_add(self): ) self.assertIsNotNone(target_child) - self.assertEqual( - target_child.tags.count(), contentnode.tags.count() - ) + self.assertEqual(target_child.tags.count(), contentnode.tags.count()) tag = ContentTag.objects.create(tag_name="tagname") contentnode.tags.add(tag) - self.assertNotEqual( - target_child.tags.count(), contentnode.tags.count() - ) + self.assertNotEqual(target_child.tags.count(), contentnode.tags.count()) sync_channel(self.derivative_channel, sync_resource_details=True) self.derivative_channel.main_tree.refresh_from_db() - self.assertEqual( - target_child.tags.count(), contentnode.tags.count() - ) + self.assertEqual(target_child.tags.count(), contentnode.tags.count()) self.assertEqual( - target_child.tags.filter( - tag_name=tag.tag_name - ).count(), + target_child.tags.filter(tag_name=tag.tag_name).count(), 1, ) @@ -269,9 +264,7 @@ def test_sync_tags_add_multiple_tags(self): ) self.assertIsNotNone(target_child) - self.assertEqual( - target_child.tags.count(), contentnode.tags.count() - ) + self.assertEqual(target_child.tags.count(), contentnode.tags.count()) # Create the same tag twice ContentTag.objects.create(tag_name="tagname") @@ -280,23 +273,19 @@ def test_sync_tags_add_multiple_tags(self): contentnode.tags.add(tag) - self.assertNotEqual( - target_child.tags.count(), contentnode.tags.count() - ) + self.assertNotEqual(target_child.tags.count(), contentnode.tags.count()) try: sync_channel(self.derivative_channel, sync_resource_details=True) except Exception as e: - self.fail("Could not run sync_channel without raising exception: {}".format(e)) + self.fail( + "Could not run sync_channel without raising exception: {}".format(e) + ) self.derivative_channel.main_tree.refresh_from_db() - self.assertEqual( - target_child.tags.count(), contentnode.tags.count() - ) + self.assertEqual(target_child.tags.count(), contentnode.tags.count()) self.assertEqual( - target_child.tags.filter( - tag_name=tag.tag_name - ).count(), + target_child.tags.filter(tag_name=tag.tag_name).count(), 1, ) @@ -361,7 +350,9 @@ def test_sync_license_description(self): .first() ) - special_permissions_license = License.objects.get(license_name="Special Permissions") + special_permissions_license = License.objects.get( + license_name="Special Permissions" + ) contentnode.license = special_permissions_license contentnode.license_description = "You cannot use this content on a Thursday" @@ -381,8 +372,13 @@ def test_sync_license_description(self): ) self.assertEqual(target_child.license, special_permissions_license) - self.assertEqual(target_child.license_description, "You cannot use this content on a Thursday") - self.assertEqual(target_child.copyright_holder, "Thursday's child has far to go") + self.assertEqual( + target_child.license_description, + "You cannot use this content on a Thursday", + ) + self.assertEqual( + target_child.copyright_holder, "Thursday's child has far to go" + ) def test_sync_channel_other_metadata_labels(self): """ @@ -445,7 +441,8 @@ def setUp(self): def _get_assessmentitem_metadata(self, assessment_id=None, contentnode_id=None): return { "assessment_id": assessment_id or uuid.uuid4().hex, - "contentnode_id": contentnode_id or self.channel.main_tree.get_descendants() + "contentnode_id": contentnode_id + or self.channel.main_tree.get_descendants() .filter(kind_id=content_kinds.EXERCISE) .first() .id, @@ -458,6 +455,7 @@ def _get_file_metadata(self): "name": "le_studio_file", "file_format": file_formats.MP3, "preset": format_presets.AUDIO, + "duration": 17, } def _upload_file_to_contentnode(self, file_metadata=None, contentnode_id=None): @@ -468,16 +466,25 @@ def _upload_file_to_contentnode(self, file_metadata=None, contentnode_id=None): to point to the contentnode. """ file = file_metadata or self._get_file_metadata() - self.client.post(reverse("file-upload-url"), file, format="json",) + self.client.post( + reverse("file-upload-url"), + file, + format="json", + ) file_from_db = File.objects.get(checksum=file["checksum"]) self.sync_changes( - [generate_update_event( - file_from_db.id, - FILE, - { - "contentnode": contentnode_id or self.channel.main_tree.get_descendants().first().id - }, - channel_id=self.channel.id)],) + [ + generate_update_event( + file_from_db.id, + FILE, + { + "contentnode": contentnode_id + or self.channel.main_tree.get_descendants().first().id + }, + channel_id=self.channel.id, + ) + ], + ) file_from_db.refresh_from_db() return file_from_db @@ -495,19 +502,29 @@ def _create_assessmentitem(self, assessmentitem, channel_id): def test_content_id__becomes_equal_on_channel_sync_assessment_item(self): # Make a copy of an existing assessmentitem contentnode. - assessmentitem_node = self.channel.main_tree.get_descendants().filter(kind_id=content_kinds.EXERCISE).first() - assessmentitem_node_copy = assessmentitem_node.copy_to(target=self.channel.main_tree) + assessmentitem_node = ( + self.channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.EXERCISE) + .first() + ) + assessmentitem_node_copy = assessmentitem_node.copy_to( + target=self.channel.main_tree + ) # Create a new assessmentitem. self._create_assessmentitem( - assessmentitem=self._get_assessmentitem_metadata(contentnode_id=assessmentitem_node_copy.id), - channel_id=self.channel.id + assessmentitem=self._get_assessmentitem_metadata( + contentnode_id=assessmentitem_node_copy.id + ), + channel_id=self.channel.id, ) # Assert after creating a new assessmentitem on copied node, it's content_id is changed. assessmentitem_node.refresh_from_db() assessmentitem_node_copy.refresh_from_db() - self.assertNotEqual(assessmentitem_node.content_id, assessmentitem_node_copy.content_id) + self.assertNotEqual( + assessmentitem_node.content_id, assessmentitem_node_copy.content_id + ) # Syncs channel. self.channel.main_tree.refresh_from_db() @@ -520,7 +537,9 @@ def test_content_id__becomes_equal_on_channel_sync_assessment_item(self): # Now after syncing the original and copied node should have same content_id. assessmentitem_node.refresh_from_db() assessmentitem_node_copy.refresh_from_db() - self.assertEqual(assessmentitem_node.content_id, assessmentitem_node_copy.content_id) + self.assertEqual( + assessmentitem_node.content_id, assessmentitem_node_copy.content_id + ) def test_content_id__becomes_equal_on_channel_sync_file(self): file = self._upload_file_to_contentnode() @@ -532,7 +551,9 @@ def test_content_id__becomes_equal_on_channel_sync_file(self): # Assert after new file upload, content_id changes. file.contentnode.refresh_from_db() file_contentnode_copy.refresh_from_db() - self.assertNotEqual(file.contentnode.content_id, file_contentnode_copy.content_id) + self.assertNotEqual( + file.contentnode.content_id, file_contentnode_copy.content_id + ) # Syncs channel. self.channel.main_tree.refresh_from_db() diff --git a/contentcuration/contentcuration/tests/test_urlendpoints.py b/contentcuration/contentcuration/tests/test_urlendpoints.py index 46750925e0..3771aebee4 100644 --- a/contentcuration/contentcuration/tests/test_urlendpoints.py +++ b/contentcuration/contentcuration/tests/test_urlendpoints.py @@ -1,7 +1,4 @@ -from __future__ import absolute_import - import importlib -from builtins import str from django.conf import settings from django.urls import reverse diff --git a/contentcuration/contentcuration/tests/test_user.py b/contentcuration/contentcuration/tests/test_user.py index 9fda1ceefe..0be1b140bb 100644 --- a/contentcuration/contentcuration/tests/test_user.py +++ b/contentcuration/contentcuration/tests/test_user.py @@ -7,7 +7,6 @@ import json import sys import tempfile -from builtins import range from django.core.management import call_command from django.test import TransactionTestCase diff --git a/contentcuration/contentcuration/tests/test_utils.py b/contentcuration/contentcuration/tests/test_utils.py index 6012d56431..f4924e92ec 100644 --- a/contentcuration/contentcuration/tests/test_utils.py +++ b/contentcuration/contentcuration/tests/test_utils.py @@ -1,8 +1,3 @@ -from __future__ import absolute_import - -from future import standard_library -standard_library.install_aliases() -from builtins import str from io import BytesIO from django.conf import settings @@ -46,7 +41,9 @@ def setUp(self): # Upload some pieces of content, as our test data self.existing_content = "dowereallyexist.jpg" - self.existing_content_path = generate_object_storage_name("dowereallyexist", self.existing_content) + self.existing_content_path = generate_object_storage_name( + "dowereallyexist", self.existing_content + ) storage.save(self.existing_content_path, BytesIO(b"maybe")) def test_returns_empty_if_content_already_exists(self): @@ -66,10 +63,7 @@ def test_returns_file_not_uploaded_yet(self): Test if a list with a nonexistent file passed in to get_file_diff would return that file. """ - files = [ - self.existing_content, - "rando" - ] + files = [self.existing_content, "rando"] assert get_file_diff(files) == ["rando"] @@ -80,8 +74,7 @@ class FileFormatsTestCase(StudioTestCase): def test_unsupported_files_raise_error(self): unsupported_file = File.objects.create( - file_on_disk=ContentFile(b"test"), - checksum='aaa' + file_on_disk=ContentFile(b"test"), checksum="aaa" ) with self.assertRaises(Exception): @@ -96,14 +89,18 @@ def test_guess_format_from_extension(self): for ext in known_extensions: file_with_ext = File.objects.create( - file_on_disk=ContentFile(b"test"), - checksum="aaa" + file_on_disk=ContentFile(b"test"), checksum="aaa" ) try: - file_with_ext.file_on_disk.save("aaa.{}".format(ext), ContentFile("aaa")) + file_with_ext.file_on_disk.save( + "aaa.{}".format(ext), ContentFile("aaa") + ) except Exception as e: - raise type(e)(e.message + " ... (hint: make sure that the version of le-utils you're using has its file formats synced).") + raise type(e)( + e.message + + " ... (hint: make sure that the version of le-utils you're using has its file formats synced)." + ) class LEUtilsListsTestCase(TestCase): @@ -112,38 +109,39 @@ class LEUtilsListsTestCase(TestCase): """ def test_le_utils_has_all_consstants_lists(self): - assert licenses.LICENSELIST, 'licenses.LICENSELIST missing from LE-UTILS!' - assert content_kinds.KINDLIST, 'content_kinds.KINDLIST missing from LE-UTILS!' - assert languages.LANGUAGELIST, 'languages.LANGUAGELIST missing from LE-UTILS!' - assert file_formats.FORMATLIST, 'file_formats.FORMATLIST missing from LE-UTILS!' - assert format_presets.PRESETLIST, 'format_presets.PRESETLIST missing from LE-UTILS!' + assert licenses.LICENSELIST, "licenses.LICENSELIST missing from LE-UTILS!" + assert content_kinds.KINDLIST, "content_kinds.KINDLIST missing from LE-UTILS!" + assert languages.LANGUAGELIST, "languages.LANGUAGELIST missing from LE-UTILS!" + assert file_formats.FORMATLIST, "file_formats.FORMATLIST missing from LE-UTILS!" + assert ( + format_presets.PRESETLIST + ), "format_presets.PRESETLIST missing from LE-UTILS!" def test_le_utils_has_all_choices(self): """Used for django model choices fields to provide validation.""" - assert content_kinds.choices, 'content_kinds.choices missing from LE-UTILS!' - assert format_presets.choices, 'format_presets.choices missing from LE-UTILS!' - assert file_formats.choices, 'file_formats.choices missing from LE-UTILS!' + assert content_kinds.choices, "content_kinds.choices missing from LE-UTILS!" + assert format_presets.choices, "format_presets.choices missing from LE-UTILS!" + assert file_formats.choices, "file_formats.choices missing from LE-UTILS!" class LoadConstantsManagementCommandTestCase(TestCase): """ Check `loadconstants` works. """ - models = [ - ContentKind, - FileFormat, - FormatPreset, - Language, - License - ] + + models = [ContentKind, FileFormat, FormatPreset, Language, License] def test_starting_from_empty_db(self): for model in self.models: qset = model.objects.all() - assert len(list(qset)) == 0, 'Constants of type {} already exist.'.format(str(model)) + assert len(list(qset)) == 0, "Constants of type {} already exist.".format( + str(model) + ) def test_models_exist_after_loadconstants(self): call_command("loadconstants") for model in self.models: qset = model.objects.all() - assert len(list(qset)) > 3, 'Only {} constants of type {} created.'.format(len(list(qset)), str(model)) + assert len(list(qset)) > 3, "Only {} constants of type {} created.".format( + len(list(qset)), str(model) + ) diff --git a/contentcuration/contentcuration/tests/test_zipcontentview.py b/contentcuration/contentcuration/tests/test_zipcontentview.py index 270a131001..d872a3e5fc 100644 --- a/contentcuration/contentcuration/tests/test_zipcontentview.py +++ b/contentcuration/contentcuration/tests/test_zipcontentview.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import os import tempfile import zipfile @@ -8,10 +6,9 @@ class ZipFileTestCase(StudioTestCase): - def setUp(self): super(ZipFileTestCase, self).setUpBase() - self.zipfile_url = '/zipcontent/' + self.zipfile_url = "/zipcontent/" self.temp_files = [] @@ -20,18 +17,21 @@ def tearDown(self): os.remove(temp_file) def do_create_zip(self): - zip_handle, zip_filename = tempfile.mkstemp(suffix='.zip') + zip_handle, zip_filename = tempfile.mkstemp(suffix=".zip") self.temp_files.append(zip_filename) os.close(zip_handle) - with zipfile.ZipFile(zip_filename, 'w') as zip: - zip.writestr("index.html", "

Hello World!

") + with zipfile.ZipFile(zip_filename, "w") as zip: + zip.writestr( + "index.html", + "

Hello World!

", + ) return zip_filename def test_invalid_zip(self): temp_file, response = self.upload_temp_file(b"Hello!", ext="zip") - url = '{}{}/'.format(self.zipfile_url, temp_file['name']) + url = "{}{}/".format(self.zipfile_url, temp_file["name"]) response = self.get(url) assert response.status_code == 500 @@ -39,9 +39,11 @@ def test_valid_zipfile(self): myzip = self.do_create_zip() self.sign_in() - temp_file, response = self.upload_temp_file(open(myzip, 'rb').read(), preset='html5_zip', ext='zip') + temp_file, response = self.upload_temp_file( + open(myzip, "rb").read(), preset="html5_zip", ext="zip" + ) assert response.status_code == 200 - url = '{}{}/'.format(self.zipfile_url, temp_file['name']) + url = "{}{}/".format(self.zipfile_url, temp_file["name"]) response = self.get(url) assert response.status_code == 200 @@ -49,9 +51,11 @@ def test_valid_zipfile_file_access(self): myzip = self.do_create_zip() self.sign_in() - temp_file, response = self.upload_temp_file(open(myzip, 'rb').read(), preset='html5_zip', ext='zip') + temp_file, response = self.upload_temp_file( + open(myzip, "rb").read(), preset="html5_zip", ext="zip" + ) assert response.status_code == 200 - url = '{}{}/index.html'.format(self.zipfile_url, temp_file['name']) + url = "{}{}/index.html".format(self.zipfile_url, temp_file["name"]) response = self.get(url) assert response.status_code == 200 @@ -59,9 +63,11 @@ def test_valid_zipfile_missing_file(self): myzip = self.do_create_zip() self.sign_in() - temp_file, response = self.upload_temp_file(open(myzip, 'rb').read(), preset='html5_zip', ext='zip') + temp_file, response = self.upload_temp_file( + open(myzip, "rb").read(), preset="html5_zip", ext="zip" + ) assert response.status_code == 200 - url = '{}{}/iamjustanillusion.txt'.format(self.zipfile_url, temp_file['name']) + url = "{}{}/iamjustanillusion.txt".format(self.zipfile_url, temp_file["name"]) response = self.get(url) assert response.status_code == 404 @@ -69,8 +75,10 @@ def test_valid_zipfile_access_outside_zip_fails(self): myzip = self.do_create_zip() self.sign_in() - temp_file, response = self.upload_temp_file(open(myzip, 'rb').read(), preset='html5_zip', ext='zip') + temp_file, response = self.upload_temp_file( + open(myzip, "rb").read(), preset="html5_zip", ext="zip" + ) assert response.status_code == 200 - url = '{}{}/../outsidejson.js'.format(self.zipfile_url, temp_file['name']) + url = "{}{}/../outsidejson.js".format(self.zipfile_url, temp_file["name"]) response = self.get(url) assert response.status_code == 404 diff --git a/contentcuration/contentcuration/tests/testdata.py b/contentcuration/contentcuration/tests/testdata.py index bbae770ef1..4a0db4fbe8 100644 --- a/contentcuration/contentcuration/tests/testdata.py +++ b/contentcuration/contentcuration/tests/testdata.py @@ -1,7 +1,4 @@ # -*- coding: utf-8 -*- -from future import standard_library -standard_library.install_aliases() - import hashlib import json import logging @@ -14,7 +11,9 @@ import pytest from django.core.files.storage import default_storage +from le_utils.constants import exercises from le_utils.constants import format_presets +from PIL import Image from contentcuration import models as cc from contentcuration.tests.utils import mixer @@ -22,63 +21,67 @@ pytestmark = pytest.mark.django_db -thumbnail_bytes = b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x06\x00\x00\x00\x1f\x15\xc4\x89\x00\x00\x00\nIDATx\x9cc\x00\x01\x00\x00\x05\x00\x01\r\n-\xb4\x00\x00\x00\x00IEND\xaeB`\x82' # noqa E501 +thumbnail_bytes = b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x06\x00\x00\x00\x1f\x15\xc4\x89\x00\x00\x00\nIDATx\x9cc\x00\x01\x00\x00\x05\x00\x01\r\n-\xb4\x00\x00\x00\x00IEND\xaeB`\x82" # noqa E501 def video(): """ Create a video content kind entry. """ - return mixer.blend(cc.ContentKind, kind='video') + return mixer.blend(cc.ContentKind, kind="video") def preset_video(): """ Create a video format preset. """ - return mixer.blend(cc.FormatPreset, id='high_res_video', kind=video()) + return mixer.blend(cc.FormatPreset, id="high_res_video", kind=video()) def topic(): """ Create a topic content kind. """ - return mixer.blend(cc.ContentKind, kind='topic') + return mixer.blend(cc.ContentKind, kind="topic") def exercise(): """ Create a exercise content kind. """ - return mixer.blend(cc.ContentKind, kind='exercise') + return mixer.blend(cc.ContentKind, kind="exercise") def slideshow(): """ Returns a slideshow content kind object. """ - return mixer.blend(cc.ContentKind, kind='slideshow') + return mixer.blend(cc.ContentKind, kind="slideshow") def fileformat_perseus(): """ Create a perseus FileFormat entry. """ - return mixer.blend(cc.FileFormat, extension='perseus', mimetype='application/exercise') + return mixer.blend( + cc.FileFormat, extension="perseus", mimetype="application/exercise" + ) def fileformat_mp4(): """ Create an mp4 FileFormat entry. """ - return mixer.blend(cc.FileFormat, extension='mp4', mimetype='application/video') + return mixer.blend(cc.FileFormat, extension="mp4", mimetype="application/video") def license_wtfpl(): """ Create a license object called WTF License. """ - return cc.License.objects.first() or mixer.blend(cc.License, license_name="WTF License") + return cc.License.objects.first() or mixer.blend( + cc.License, license_name="WTF License" + ) def fileobj_video(contents=None): @@ -92,10 +95,12 @@ def fileobj_video(contents=None): logging.warning("input = {}".format(contents)) filecontents = contents else: - filecontents = "".join(random.sample(string.printable, 20)).encode('utf-8') + filecontents = "".join(random.sample(string.printable, 20)).encode("utf-8") logging.warning("contents = {}".format(filecontents)) - temp_file_dict = create_studio_file(filecontents, preset=format_presets.VIDEO_HIGH_RES, ext='mp4') - return temp_file_dict['db_file'] + temp_file_dict = create_studio_file( + filecontents, preset=format_presets.VIDEO_HIGH_RES, ext="mp4" + ) + return temp_file_dict["db_file"] def node_json(data): @@ -105,11 +110,11 @@ def node_json(data): "content_id": "aa480b60a7f4526f886e7df9f4e9b8cc", "description": "Recipes for various dishes.", "author": "Bradley Smoker", - "kind": data['kind'], - "license": data['license'], + "kind": data["kind"], + "license": data["license"], "extra_fields": {}, "files": [], - "questions": [] + "questions": [], } return node_data @@ -118,36 +123,36 @@ def node_json(data): def node(data, parent=None): # noqa: C901 new_node = None # Create topics - if 'node_id' not in data: - data['node_id'] = uuid.uuid4() - if data['kind_id'] == "topic": + if "node_id" not in data: + data["node_id"] = uuid.uuid4() + if data["kind_id"] == "topic": new_node = cc.ContentNode( kind=topic(), parent=parent, - title=data['title'], - node_id=data['node_id'], - content_id=data.get('content_id') or data['node_id'], - sort_order=data.get('sort_order', 1), + title=data["title"], + node_id=data["node_id"], + content_id=data.get("content_id") or data["node_id"], + sort_order=data.get("sort_order", 1), complete=True, ) new_node.save() - if 'children' in data: - for child in data['children']: + if "children" in data: + for child in data["children"]: node(child, parent=new_node) # Create videos - elif data['kind_id'] == "video": + elif data["kind_id"] == "video": new_node = cc.ContentNode( kind=video(), parent=parent, - title=data['title'], - node_id=data['node_id'], + title=data["title"], + node_id=data["node_id"], license=license_wtfpl(), - content_id=data.get('content_id') or data['node_id'], - sort_order=data.get('sort_order', 1), + content_id=data.get("content_id") or data["node_id"], + sort_order=data.get("sort_order", 1), complete=True, - extra_fields=data.get('extra_fields'), + extra_fields=data.get("extra_fields"), ) new_node.save() video_file = fileobj_video(contents=b"Video File") @@ -157,43 +162,43 @@ def node(data, parent=None): # noqa: C901 video_file.save() # Create exercises - elif data['kind_id'] == "exercise": + elif data["kind_id"] == "exercise": if "extra_fields" in data: extra_fields = data["extra_fields"] else: extra_fields = { - 'mastery_model': data['mastery_model'], - 'randomize': True, - 'm': data.get('m') or 0, - 'n': data.get('n') or 0 + "mastery_model": data.get("mastery_model", "m_of_n"), + "randomize": True, + "m": data.get("m") or 0, + "n": data.get("n") or 0, } new_node = cc.ContentNode( kind=exercise(), parent=parent, - title=data['title'], - node_id=data['node_id'], + title=data["title"], + node_id=data["node_id"], license=license_wtfpl(), extra_fields=extra_fields, - content_id=data.get('content_id') or data['node_id'], - sort_order=data.get('sort_order', 1), + content_id=data.get("content_id") or data["node_id"], + sort_order=data.get("sort_order", 1), complete=True, ) new_node.save() - for assessment_item in data.get('assessment_items', []): + for assessment_item in data.get("assessment_items", []): ai = cc.AssessmentItem( contentnode=new_node, - assessment_id=assessment_item['assessment_id'], - question=assessment_item['question'], - type=assessment_item['type'], - answers=json.dumps(assessment_item['answers']), - hints=json.dumps(assessment_item.get('hints') or []) + assessment_id=assessment_item["assessment_id"], + question=assessment_item["question"], + type=assessment_item["type"], + answers=json.dumps(assessment_item["answers"]), + hints=json.dumps(assessment_item.get("hints") or []), ) ai.save() - if data.get('tags'): - for tag in data['tags']: - t = cc.ContentTag(tag_name=tag['tag_name']) + if data.get("tags"): + for tag in data["tags"]: + t = cc.ContentTag(tag_name=tag["tag_name"]) t.save() new_node.tags.add(t) new_node.save() @@ -212,7 +217,9 @@ def tree(parent=None): def channel(name="testchannel"): channel_creator = user() - channel = cc.Channel.objects.create(name=name, actor_id=channel_creator.id) + channel = cc.Channel.objects.create( + name=name, actor_id=channel_creator.id, language_id="en" + ) channel.save() channel.main_tree = tree() @@ -227,13 +234,15 @@ def random_string(chars=10): :param chars: Number of characters in string :return: A string with [chars] random characters. """ - return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(chars)) + return "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(chars) + ) -def user(email='user@test.com', feature_flags=None): +def user(email="user@test.com", feature_flags=None): user, is_new = cc.User.objects.get_or_create(email=email) if is_new: - user.set_password('password') + user.set_password("password") user.is_active = True user.save() if feature_flags is not None: @@ -242,14 +251,19 @@ def user(email='user@test.com', feature_flags=None): return user -def create_temp_file(filebytes, preset='document', ext='pdf', original_filename=None): +def create_temp_file(filebytes, preset="document", ext="pdf", original_filename=None): """Old name for create_studio_file.""" import warnings - warnings.warn('Deprecated function; use create_studio_file instead.', DeprecationWarning) - return create_studio_file(filebytes, preset='document', ext='pdf', original_filename=None) + + warnings.warn( + "Deprecated function; use create_studio_file instead.", DeprecationWarning + ) + return create_studio_file( + filebytes, preset="document", ext="pdf", original_filename=None + ) -def create_studio_file(filebytes, preset='document', ext='pdf', original_filename=None): +def create_studio_file(filebytes, preset="document", ext="pdf", original_filename=None): """ Create a file with contents of `filebytes` and the associated cc.File object for it. :param filebytes: The data to be stored in the file (as bytes) @@ -263,7 +277,7 @@ def create_studio_file(filebytes, preset='document', ext='pdf', original_filenam - db_file (cc.File): a Studio File object saved in DB """ try: - filebytes = filebytes.encode('utf-8') + filebytes = filebytes.encode("utf-8") except: # noqa pass @@ -286,25 +300,27 @@ def create_studio_file(filebytes, preset='document', ext='pdf', original_filenam preset = cc.FormatPreset.objects.get(id=preset) file_format = cc.FileFormat.objects.get(extension=ext) if original_filename is None: - original_filename = 'somefile.' + ext + original_filename = "somefile." + ext # 3. Create a File object - db_file_obj = mixer.blend(cc.File, - checksum=checksum, - file_format=file_format, - preset=preset, - original_filename=original_filename, - file_on_disk=storage_file_path) + db_file_obj = mixer.blend( + cc.File, + checksum=checksum, + file_format=file_format, + preset=preset, + original_filename=original_filename, + file_on_disk=storage_file_path, + ) return { - 'name': os.path.basename(storage_file_path), - 'data': filebytes, - 'file': fileobj, - 'db_file': db_file_obj + "name": os.path.basename(storage_file_path), + "data": filebytes, + "file": fileobj, + "db_file": db_file_obj, } -def create_test_file(filebytes, ext='pdf'): +def create_test_file(filebytes, ext="pdf"): """ Create a temporary file with contents of `filebytes` for use in tests. :param filebytes: The data to be stored in the file (as bytes) @@ -324,11 +340,11 @@ def create_test_file(filebytes, ext='pdf'): fileobj.write(filebytes) fileobj.seek(0) return { - 'checksum': checksum, - 'name': os.path.basename(storage_file_path), - 'storagepath': storage_file_path, - 'data': filebytes, - 'file': fileobj + "checksum": checksum, + "name": os.path.basename(storage_file_path), + "storagepath": storage_file_path, + "data": filebytes, + "file": fileobj, } @@ -344,90 +360,100 @@ def create_test_file(filebytes, ext='pdf'): "description": "Practice counting up to 10 objects.", "author": "Khan Academy", "extra_fields": {}, - "exercise_data": { - "m": 5, - "n": 7, - "mastery_model": "m_of_n" - }, + "exercise_data": {"m": 5, "n": 7, "mastery_model": "m_of_n"}, "license": "CC-BY", "files": [], "questions": [ { - 'type': 'single_selection', - 'question': 'What is your quest?', - 'hints': ['Holy', 'Coconuts'], - 'answers': [ - 'To seek the grail', - 'To eat some hail', - 'To spectacularly fail', - 'To post bail' + "type": "single_selection", + "question": "What is your quest?", + "hints": ["Holy", "Coconuts"], + "answers": [ + "To seek the grail", + "To eat some hail", + "To spectacularly fail", + "To post bail", ], - 'files': [ + "files": [ { - 'filename': 'nonexistant.mp4', - 'size': 0, + "filename": "nonexistant.mp4", + "size": 0, } ], - 'source_url': '', - 'raw_data': '', - 'assessment_id': '1' + "source_url": "", + "raw_data": "", + "assessment_id": "1", } - ] - + ], } ] -def fileobj_exercise_image(): +def fileobj_exercise_image(size=(100, 100), color="red"): """ Create a generic exercise image file in storage and return a File model pointing to it. """ - filecontents = "".join(random.sample(string.printable, 20)) - temp_file_dict = create_studio_file(filecontents, preset=format_presets.EXERCISE_IMAGE, ext='jpg') - return temp_file_dict['db_file'] + image = Image.new("RGB", size, color=color) + buffer = BytesIO() + image.save(buffer, "JPEG") + temp_file_dict = create_studio_file( + buffer.getvalue(), preset=format_presets.EXERCISE_IMAGE, ext="jpg" + ) + return temp_file_dict["db_file"] -def fileobj_exercise_graphie(): +def fileobj_exercise_graphie(original_filename=None): """ Create an graphi exercise image file in storage and return a File model pointing to it. """ - filecontents = "".join(random.sample(string.printable, 20)) - temp_file_dict = create_studio_file(filecontents, preset=format_presets.EXERCISE_GRAPHIE, ext='graphie', original_filename='theoriginalfilename') - return temp_file_dict['db_file'] + svg_content = f"{original_filename or ''.join(random.sample(string.printable, 20))}" + json_content = '{"version": {"major": 0, "minor": 0}}' + filecontents = svg_content + exercises.GRAPHIE_DELIMITER + json_content + temp_file_dict = create_studio_file( + filecontents, + preset=format_presets.EXERCISE_GRAPHIE, + ext="graphie", + original_filename=original_filename or "theoriginalfilename", + ) + return temp_file_dict["db_file"] def base64encoding(): - return "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/" \ - "9hAAACk0lEQVR4AaWTA7TbbABA8/+zreMdzbYOZtu2bbt4rpPUtvlebbezbdvMvsxmG99740" \ - "CDF6z4p/G3RYkFLQPGmvj8mx30m7uo1LhNO6ou50r++zrkMoj/cRWUJmIz0gvEDXIVvP/Hbd" \ - "xRte+chaXe7gDDsP9WwqLJixicgqWwsNrncZFJ2UnmM+Xy1awlqDz/LVsKC6oDtxA0k/B1aD" \ - "Oi6rMBVVi2ys1Td+qd5NU8ZV0cWEKeWsZ4IKbdn3ikOJTogm9bw1PWw50twAWNFbS9oK1UlX" \ - "Y337KA6sxwiBb/NIJYM3KrRNOSppD1YNtM9wwHUs+S188M38hXtCKKNSOAM4PmzKCgWQhaNU" \ - "SiGCIE1DKGYozyJc5EW47ZZ2Ka3U0oNieTbLNjruOHsCO3LvNgq6cZznAHuAICah5DohjDUEG" \ - "+OciQRsbQlFGKUOvrw9d6uSiiKcu3h9S86F7Me/oMtv/yFVsofaQCYHyhxtcLuFSGNDwatCGI" \ - "SrZE6EzXIJYkoqILPR0k2oCMo/b1EOpcQqEnjkXPnseOX71uEuqDvQCTAqfjW5fhGkQlWyMQf" \ - "acZYRHs61jc4HKOJAGXBE+1F1vjdRiwegEstrywB9OYK5zdITZH6xUHTnUADgLcpaBZD1omxCY" \ - "5m6K7HRaEUDxDZjoyWOs9Xwu/43lbWTUKSfwwzNGfROX2hvg2wGrLjEcGIwTHTHR3sQW0jSEcIN" \ - "tsnembjYu2z0fKfngHaEXm2jzYmXaUHL7k3H+z6YftOxagZXEXNJ2+eJV3zGF/8RZyWZ6RakH8ad" \ - "Z9AksmLmz6nO2cy/3vl9+CnJdYZJRmn+x1HsOOh07BkcTF0p/z39hBuoJNuW9U2nF01rngydo/+xr" \ + return ( + "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/" + "9hAAACk0lEQVR4AaWTA7TbbABA8/+zreMdzbYOZtu2bbt4rpPUtvlebbezbdvMvsxmG99740" + "CDF6z4p/G3RYkFLQPGmvj8mx30m7uo1LhNO6ou50r++zrkMoj/cRWUJmIz0gvEDXIVvP/Hbd" + "xRte+chaXe7gDDsP9WwqLJixicgqWwsNrncZFJ2UnmM+Xy1awlqDz/LVsKC6oDtxA0k/B1aD" + "Oi6rMBVVi2ys1Td+qd5NU8ZV0cWEKeWsZ4IKbdn3ikOJTogm9bw1PWw50twAWNFbS9oK1UlX" + "Y337KA6sxwiBb/NIJYM3KrRNOSppD1YNtM9wwHUs+S188M38hXtCKKNSOAM4PmzKCgWQhaNU" + "SiGCIE1DKGYozyJc5EW47ZZ2Ka3U0oNieTbLNjruOHsCO3LvNgq6cZznAHuAICah5DohjDUEG" + "+OciQRsbQlFGKUOvrw9d6uSiiKcu3h9S86F7Me/oMtv/yFVsofaQCYHyhxtcLuFSGNDwatCGI" + "SrZE6EzXIJYkoqILPR0k2oCMo/b1EOpcQqEnjkXPnseOX71uEuqDvQCTAqfjW5fhGkQlWyMQf" + "acZYRHs61jc4HKOJAGXBE+1F1vjdRiwegEstrywB9OYK5zdITZH6xUHTnUADgLcpaBZD1omxCY" + "5m6K7HRaEUDxDZjoyWOs9Xwu/43lbWTUKSfwwzNGfROX2hvg2wGrLjEcGIwTHTHR3sQW0jSEcIN" + "tsnembjYu2z0fKfngHaEXm2jzYmXaUHL7k3H+z6YftOxagZXEXNJ2+eJV3zGF/8RZyWZ6RakH8ad" + "Z9AksmLmz6nO2cy/3vl9+CnJdYZJRmn+x1HsOOh07BkcTF0p/z39hBuoJNuW9U2nF01rngydo/+xr" "/aXwDY2vpQfdHLrIAAAAASUVORK5CYII=" + ) def generated_base64encoding(): - return "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAA"\ - "C8klEQVR4nKWTSWhVZxiGv/N//3+Ge+49d8gdkphYOyxMGm+p1QQSm40KIgqKoKUuKu0idFMIWRWKC7"\ - "G4sqEDxZUEQciwMsaEihsV0ThAojYmahK8NjXJzXCH3DP955zfRUkWIljwW368z7t6H+nA953wPkf/b"\ - "/DY/q0MACIAUO4bnuTrfwIAwH0X9UTM+OSL7dKb4KFPU9Kh9g8ahBDtAKC8WqO+Ho8ZrucgAIAkhJC6"\ - "zl047vju54js1MzD8eI6vHtfS0va0I44+bmX3DMvXL45V/wom435vndSQfnB04djF6WfzvXt9aXgBxb"\ - "RB6iqNpZWV36ZvD+62PH1gSqf0SEvpGY5wp6Lf/TebtjRkonEE53ctie8cuUoCtJNiAMdOgsPVyU3fUm"\ - "Z/CTOcNf21tbs7D/zjYvLhUaUCP04lc5kdzZ/FmfYSpk8lUpuatNZeJg40EE0IddIHJaE6WC9oj1Kx5Lf"\ - "ZKJxHhipr1aAGWElJEdQOVifTnupWPJEvaKNB6YjS1zkNaHUEtlDP6ongNhQ8ktmFboiT/9dnTYkLZWK"\ - "1wLSEHBHqm6qrp1BVyz7RTNObChF9YSQPSII9SQURdOkXNSU14ICA9RIItlCLNtEywaVIKgEvelcvpUB"\ - "yuVKUKZcVIuCZVGPEEpc8QgLvAkU/7aqhL9Np5PdC6X8i9LL3ChW7OMFRmmFkDFC6eNUNPOrbS19xx3n"\ - "Fhb5NvCDMaIw9TcU0i6yYBZDhnGl7LHZ/it9eevVUq81lx99MZWbnsnN9/SPDCys+Ww2FDGGyEJlDQVpU5"\ - "j6OxnMjUwIHvzMLTv0bOT61Z6B7mUAACVeh9FYnbpl81btw6ZmDQCgZ6B76flfN65yy9EE908P5kYmKQDA0"\ - "OK1Ozu9htH7dEqsjyik6O0RVW/KIFM8yzoMABMAAPdg0m1exD/v4t9iY8oAAPfokw34v4JwjcxkQYIAYq5b9"\ + return ( + "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAA" + "C8klEQVR4nKWTSWhVZxiGv/N//3+Ge+49d8gdkphYOyxMGm+p1QQSm40KIgqKoKUuKu0idFMIWRWKC7" + "G4sqEDxZUEQciwMsaEihsV0ThAojYmahK8NjXJzXCH3DP955zfRUkWIljwW368z7t6H+nA953wPkf/b" + "/DY/q0MACIAUO4bnuTrfwIAwH0X9UTM+OSL7dKb4KFPU9Kh9g8ahBDtAKC8WqO+Ho8ZrucgAIAkhJC6" + "zl047vju54js1MzD8eI6vHtfS0va0I44+bmX3DMvXL45V/wom435vndSQfnB04djF6WfzvXt9aXgBxb" + "RB6iqNpZWV36ZvD+62PH1gSqf0SEvpGY5wp6Lf/TebtjRkonEE53ctie8cuUoCtJNiAMdOgsPVyU3fUm" + "Z/CTOcNf21tbs7D/zjYvLhUaUCP04lc5kdzZ/FmfYSpk8lUpuatNZeJg40EE0IddIHJaE6WC9oj1Kx5Lf" + "ZKJxHhipr1aAGWElJEdQOVifTnupWPJEvaKNB6YjS1zkNaHUEtlDP6ongNhQ8ktmFboiT/9dnTYkLZWK" + "1wLSEHBHqm6qrp1BVyz7RTNObChF9YSQPSII9SQURdOkXNSU14ICA9RIItlCLNtEywaVIKgEvelcvpUB" + "yuVKUKZcVIuCZVGPEEpc8QgLvAkU/7aqhL9Np5PdC6X8i9LL3ChW7OMFRmmFkDFC6eNUNPOrbS19xx3n" + "Fhb5NvCDMaIw9TcU0i6yYBZDhnGl7LHZ/it9eevVUq81lx99MZWbnsnN9/SPDCys+Ww2FDGGyEJlDQVpU5" + "j6OxnMjUwIHvzMLTv0bOT61Z6B7mUAACVeh9FYnbpl81btw6ZmDQCgZ6B76flfN65yy9EE908P5kYmKQDA0" + "OK1Ozu9htH7dEqsjyik6O0RVW/KIFM8yzoMABMAAPdg0m1exD/v4t9iY8oAAPfokw34v4JwjcxkQYIAYq5b9" "+OJrg1v1uF3yITnGcV5zxcxRYhLZ3rOem9LSe+r82vB1kP1vFwEDQAAAABJRU5ErkJggg==" + ) def srt_subtitle(): diff --git a/contentcuration/contentcuration/tests/utils/__init__.py b/contentcuration/contentcuration/tests/utils/__init__.py index c9d7c65893..b89c103587 100644 --- a/contentcuration/contentcuration/tests/utils/__init__.py +++ b/contentcuration/contentcuration/tests/utils/__init__.py @@ -1,8 +1,10 @@ #!/usr/bin/env python import sys -from .migration_test_case import * # noqa + import pytest from mixer.backend.django import mixer + +from .migration_test_case import * # noqa from contentcuration.models import ContentNode # Mark the test class or function as a slow test, where we avoid running it @@ -10,10 +12,7 @@ # Use py.test --includeslowtests to run these kinds of tests. slowtest = pytest.mark.skipif( "--includeslowtests" not in sys.argv, - reason="Skipping because this test is a slow test." + reason="Skipping because this test is a slow test.", ) -mixer.register( - ContentNode, - extra_fields=lambda: {'a': 1, 'b': 2, 'c': {'d': 3}} -) +mixer.register(ContentNode, extra_fields=lambda: {"a": 1, "b": 2, "c": {"d": 3}}) diff --git a/contentcuration/contentcuration/tests/utils/celery/test_tasks.py b/contentcuration/contentcuration/tests/utils/celery/test_tasks.py index 0b203c41a0..dbe6928cb6 100644 --- a/contentcuration/contentcuration/tests/utils/celery/test_tasks.py +++ b/contentcuration/contentcuration/tests/utils/celery/test_tasks.py @@ -16,7 +16,9 @@ def test_set_total(self): self.assertEqual(200, self.tracker.total) def test_increment(self): - with mock.patch("contentcuration.utils.celery.tasks.ProgressTracker.track") as track: + with mock.patch( + "contentcuration.utils.celery.tasks.ProgressTracker.track" + ) as track: self.tracker.increment() track.assert_called_with(1.0) self.tracker.progress = 1 diff --git a/contentcuration/contentcuration/tests/utils/migration_test_case.py b/contentcuration/contentcuration/tests/utils/migration_test_case.py index c330fdada7..92fa984916 100644 --- a/contentcuration/contentcuration/tests/utils/migration_test_case.py +++ b/contentcuration/contentcuration/tests/utils/migration_test_case.py @@ -1,5 +1,5 @@ -from django.db import connection from django.core import management +from django.db import connection from django.db.migrations.executor import MigrationExecutor from django.test import TransactionTestCase @@ -12,8 +12,11 @@ class MigrationTestCase(TransactionTestCase): app = None def setUp(self): - assert self.migrate_from and self.migrate_to, \ - "TestCase '{}' must define migrate_from and migrate_to properties".format(type(self).__name__) + assert ( + self.migrate_from and self.migrate_to + ), "TestCase '{}' must define migrate_from and migrate_to properties".format( + type(self).__name__ + ) migrate_from = [(self.app, self.migrate_from)] migrate_to = [(self.app, self.migrate_to)] diff --git a/contentcuration/contentcuration/tests/utils/perseus_question_new_bar_graphs.json b/contentcuration/contentcuration/tests/utils/perseus_question_new_bar_graphs.json new file mode 100644 index 0000000000..7ebde4559e --- /dev/null +++ b/contentcuration/contentcuration/tests/utils/perseus_question_new_bar_graphs.json @@ -0,0 +1,114 @@ +{ + "answerArea": { + "calculator": false, + "chi2Table": false, + "periodicTable": false, + "tTable": false, + "zTable": false + }, + "hints": [ + { + "content": "The bottom bar lines up to $\\purpleD{6}$. \n\n![](web+graphie://cdn.kastatic.org/ka-perseus-graphie/d855aefe9a722f9a794b0883ebcdb8c37b4ba0c7)\n\nWhich type of fruit has $\\purpleD{6}$ in Luigi's home?", + "images": { + "web+graphie://cdn.kastatic.org/ka-perseus-graphie/d855aefe9a722f9a794b0883ebcdb8c37b4ba0c7": { + "height": 330, + "width": 404 + } + }, + "replace": false, + "widgets": {} + }, + { + "content": "Kind of fruit | Number\n:- | :-: \nOranges | $\\purpleD{6}$ \n\nLuigi has $\\purpleD{6}$ oranges. So, the bottom bar should be labeled $\\purpleD{\\text{Oranges}}$.", + "images": {}, + "replace": false, + "widgets": {} + }, + { + "content": "Now let's label the other bars to match the table.", + "images": {}, + "replace": false, + "widgets": {} + }, + { + "content": "Here is the completed graph:\n\n![](web+graphie://cdn.kastatic.org/ka-perseus-graphie/95262ebaf42bdd1929e5d6d1e2853d3eb0a5cc74)", + "images": { + "web+graphie://cdn.kastatic.org/ka-perseus-graphie/95262ebaf42bdd1929e5d6d1e2853d3eb0a5cc74": { + "height": 330, + "width": 404 + } + }, + "replace": false, + "widgets": {} + } + ], + "itemDataVersion": { + "major": 0, + "minor": 1 + }, + "question": { + "content": "Luigi created a chart and a bar graph to show how many of each type of fruit were in his home.\n\nKind of fruit | Number \n:- | :-: \nApple | $7$ \nStrawberries | $3$ \nOranges | $6$ \nBananas| $2$ \n\n**Label each bar on the bar graph.**\n\n[[☃ label-image 1]]\n", + "images": {}, + "widgets": { + "label-image 1": { + "alignment": "default", + "graded": true, + "options": { + "choices": [ + "Apple", + "Strawberries", + "Oranges", + "Bananas" + ], + "hideChoicesFromInstructions": true, + "imageAlt": "", + "imageHeight": 330, + "imageUrl": "web+graphie://cdn.kastatic.org/ka-perseus-graphie/ab207c6f38c887130b68c078e6158a87aab60c45", + "imageWidth": 404, + "markers": [ + { + "answers": [ + "Strawberries" + ], + "label": "", + "x": 24.1, + "y": 17.7 + }, + { + "answers": [ + "Bananas" + ], + "label": "", + "x": 24.4, + "y": 35.7 + }, + { + "answers": [ + "Apple" + ], + "label": "", + "x": 23.8, + "y": 52.9 + }, + { + "answers": [ + "Oranges" + ], + "label": "", + "x": 24.1, + "y": 70.9 + } + ], + "multipleAnswers": false, + "static": false + }, + "static": false, + "type": "label-image", + "version": { + "major": 0, + "minor": 0 + } + } + } + } +} diff --git a/contentcuration/contentcuration/tests/utils/qti/__init__.py b/contentcuration/contentcuration/tests/utils/qti/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/contentcuration/contentcuration/tests/utils/qti/test_assessment_items.py b/contentcuration/contentcuration/tests/utils/qti/test_assessment_items.py new file mode 100644 index 0000000000..6bf2f71e51 --- /dev/null +++ b/contentcuration/contentcuration/tests/utils/qti/test_assessment_items.py @@ -0,0 +1,504 @@ +import unittest + +from contentcuration.utils.assessment.qti.assessment_item import AssessmentItem +from contentcuration.utils.assessment.qti.assessment_item import CorrectResponse +from contentcuration.utils.assessment.qti.assessment_item import DefaultValue +from contentcuration.utils.assessment.qti.assessment_item import ItemBody +from contentcuration.utils.assessment.qti.assessment_item import MapEntry +from contentcuration.utils.assessment.qti.assessment_item import Mapping +from contentcuration.utils.assessment.qti.assessment_item import OutcomeDeclaration +from contentcuration.utils.assessment.qti.assessment_item import ResponseDeclaration +from contentcuration.utils.assessment.qti.assessment_item import ResponseProcessing +from contentcuration.utils.assessment.qti.assessment_item import Value +from contentcuration.utils.assessment.qti.constants import BaseType +from contentcuration.utils.assessment.qti.constants import Cardinality +from contentcuration.utils.assessment.qti.html import Blockquote +from contentcuration.utils.assessment.qti.html import Br +from contentcuration.utils.assessment.qti.html import Div +from contentcuration.utils.assessment.qti.html import P +from contentcuration.utils.assessment.qti.html import Strong +from contentcuration.utils.assessment.qti.interaction_types.simple import ( + ChoiceInteraction, +) +from contentcuration.utils.assessment.qti.interaction_types.simple import SimpleChoice +from contentcuration.utils.assessment.qti.interaction_types.text_based import ( + ExtendedTextInteraction, +) +from contentcuration.utils.assessment.qti.interaction_types.text_based import ( + TextEntryInteraction, +) +from contentcuration.utils.assessment.qti.prompt import Prompt + + +class QTIAssessmentItemTests(unittest.TestCase): + def test_true_false_question(self): + expected_xml = """ + + +true + + + + +1 + + + +

This is a True/False question?

+ +True +False + +
+
+""".replace( + "\n", "" + ) + + # Construct the QTI elements + response_declaration = ResponseDeclaration( + identifier="RESPONSE", + cardinality=Cardinality.SINGLE, + base_type=BaseType.IDENTIFIER, + correct_response=CorrectResponse(value=[Value(value="true")]), + ) + + outcome_declaration = OutcomeDeclaration( + identifier="SCORE", + cardinality=Cardinality.SINGLE, + base_type=BaseType.FLOAT, + default_value=DefaultValue(value=[Value(value="1")]), + ) + + true_choice = SimpleChoice(identifier="true", children=["True"]) + false_choice = SimpleChoice(identifier="false", children=["False"]) + choice_interaction = ChoiceInteraction( + response_identifier="RESPONSE", + max_choices=1, + answers=[true_choice, false_choice], + ) + + item_body = ItemBody( + children=[ + P(children=["This is a True/False question?"]), + choice_interaction, + ] + ) + response_processing = ResponseProcessing( + template="https://purl.imsglobal.org/spec/qti/v3p0/rptemplates/match_correct" + ) + + assessment_item = AssessmentItem( + identifier="beginnersguide007", + title="BG true false example ", + language="EN-US", + time_dependent=False, + item_body=item_body, + response_declaration=[response_declaration], + outcome_declaration=[outcome_declaration], + response_processing=response_processing, + ) + + # Generate the XML + generated_xml = assessment_item.to_xml_string() + + # Compare the generated XML with the expected XML + self.assertEqual(generated_xml.strip(), expected_xml.strip()) + + def test_multiple_choice_question(self): + expected_xml = """ + + +A +C +D + + + + +1 + + + +

QTI 3 is a new version released in 2022.

+ + +

Which of the following features are new to QTI 3?

+

Pick 3 choices.

+
+Shared Vocabulary +Pineapple Flavored +Catalogs for candidate-specific content. +Conformance features definitions +A subset of HTML5 elements +
+
+ +
""".replace( + "\n", "" + ) + response_declaration = ResponseDeclaration( + identifier="RESPONSE", + cardinality=Cardinality.MULTIPLE, + base_type=BaseType.IDENTIFIER, + correct_response=CorrectResponse( + value=[ + Value(value="A"), + Value(value="C"), + Value(value="D"), + ] + ), + ) + + outcome_declaration = OutcomeDeclaration( + identifier="SCORE", + cardinality=Cardinality.SINGLE, + base_type=BaseType.FLOAT, + default_value=DefaultValue(value=[Value(value="1")]), + ) + + prompt = Prompt( + children=[ + P( + children=[ + "Which of the following features are ", + Strong(children=["new"]), + " to QTI 3?", + ] + ), + P(children=["Pick 3 choices."]), + ] + ) + choice_a = SimpleChoice(identifier="A", children=["Shared Vocabulary"]) + choice_b = SimpleChoice(identifier="B", children=["Pineapple Flavored"]) + choice_c = SimpleChoice( + identifier="C", + children=["Catalogs for candidate-specific content."], + ) + choice_d = SimpleChoice( + identifier="D", children=["Conformance features definitions"] + ) + choice_e = SimpleChoice(identifier="E", children=["A subset of HTML5 elements"]) + choice_interaction = ChoiceInteraction( + response_identifier="RESPONSE", + max_choices=3, + answers=[choice_a, choice_b, choice_c, choice_d, choice_e], + prompt=prompt, + ) + + item_body = ItemBody( + children=[ + P(children=["QTI 3 is a new version released in 2022."]), + choice_interaction, + ] + ) + response_processing = ResponseProcessing( + template="https://purl.imsglobal.org/spec/qti/v3p0/rptemplates/match_correct" + ) + + assessment_item = AssessmentItem( + identifier="beginnersguide008", + title="BG Choice example", + language="EN-US", + time_dependent=False, + item_body=item_body, + response_declaration=[response_declaration], + outcome_declaration=[outcome_declaration], + response_processing=response_processing, + ) + + generated_xml = assessment_item.to_xml_string() + self.assertEqual(generated_xml.strip(), expected_xml.strip()) + + def test_long_text_question(self): + expected_xml = """ + + + +

Read this postcard from your English pen-friend, Sam.

+
+
+

Here is a postcard of my town. Please send me
+a postcard from your town. What size is your Town?
+What is the nicest part of your town?
+Where do you go in the evenings?

+

Sam

+
+
+ +Write Sam a postcard. Answer the questions. Write 23–30 words + +
+
""".replace( + "\n", "" + ) + response_declaration = ResponseDeclaration( + identifier="RESPONSE", + cardinality=Cardinality.SINGLE, + base_type=BaseType.STRING, + ) + + outcome_declaration = OutcomeDeclaration( + identifier="SCORE", + cardinality=Cardinality.SINGLE, + base_type=BaseType.FLOAT, + ) + + prompt_text = "Write Sam a postcard. Answer the questions. Write 23–30 words" + + extended_text_interaction = ExtendedTextInteraction( + response_identifier="RESPONSE", + prompt=Prompt(children=[prompt_text]), + ) + + item_body = ItemBody( + children=[ + P(children=["Read this postcard from your English pen-friend, Sam."]), + Div( + children=[ + Blockquote( + class_="postcard", + children=[ + P( + children=[ + "Here is a postcard of my town. Please send me", + Br(), + "a postcard from your town. What size is your Town?", + Br(), + "What is the nicest part of your town?", + Br(), + "Where do you go in the evenings?", + ] + ), + P(children=["Sam"]), + ], + ) + ] + ), + extended_text_interaction, + ] + ) + + assessment_item = AssessmentItem( + identifier="beginnersguide009", + title="BG Postcard example", + language="en-US", + time_dependent=False, + item_body=item_body, + response_declaration=[response_declaration], + outcome_declaration=[outcome_declaration], + ) + + generated_xml = assessment_item.to_xml_string() + self.assertEqual(generated_xml.strip(), expected_xml.strip()) + + def test_missing_word_question(self): + expected_xml = """ + + +York + + + + + + + + +

Identify the missing word in this famous quote from Shakespeare's Richard III.

+
+
+

Now is the winter of our discontent
+Made glorious summer by this sun of ; +
+And all the clouds that lour'd upon our house
+In the deep bosom of the ocean buried.

+
+
+
+ +
""".replace( + "\n", "" + ) + + response_declaration = ResponseDeclaration( + identifier="RESPONSE", + cardinality=Cardinality.SINGLE, + base_type=BaseType.STRING, + correct_response=CorrectResponse(value=[Value(value="York")]), + mapping=Mapping( + default_value=0, + map_entries=[ + MapEntry(map_key="York", mapped_value=1, case_sensitive=True), + MapEntry(map_key="york", mapped_value=0.5), + ], + ), + ) + + outcome_declaration = OutcomeDeclaration( + identifier="SCORE", + cardinality=Cardinality.SINGLE, + base_type=BaseType.FLOAT, + ) + + text_entry_interaction = TextEntryInteraction(response_identifier="RESPONSE") + + item_body = ItemBody( + children=[ + P( + children=[ + "Identify the missing word in this famous quote from Shakespeare's Richard III." + ] + ), + Div( + children=[ + Blockquote( + class_="postcard", + children=[ + P( + children=[ + "Now is the winter of our discontent", + Br(), + "Made glorious summer by this sun of ", + text_entry_interaction, + ";", + Br(), + "And all the clouds that lour'd upon our house", + Br(), + "In the deep bosom of the ocean buried.", + ] + ), + ], + ) + ] + ), + ] + ) + + response_processing = ResponseProcessing( + template="https://purl.imsglobal.org/spec/qti/v3p0/rptemplates/map_response" + ) + + assessment_item = AssessmentItem( + identifier="beginnersguide010", + title="BG Missing Word example", + language="en-US", + time_dependent=False, + item_body=item_body, + response_declaration=[response_declaration], + outcome_declaration=[outcome_declaration], + response_processing=response_processing, + ) + + generated_xml = assessment_item.to_xml_string() + self.assertEqual(generated_xml.strip(), expected_xml.strip()) + + def test_numerical_entry_question(self): + expected_xml = """ + + +42.5 + + + + +0.0 + + + +

Calculate the value of x when 2x + 5 = 90:

+

+
+
""".replace( + "\n", "" + ) + + response_declaration = ResponseDeclaration( + identifier="RESPONSE", + cardinality=Cardinality.SINGLE, + base_type=BaseType.FLOAT, + correct_response=CorrectResponse(value=[Value(value="42.5")]), + ) + + outcome_declaration = OutcomeDeclaration( + identifier="SCORE", + cardinality=Cardinality.SINGLE, + base_type=BaseType.FLOAT, + default_value=DefaultValue(value=[Value(value="0.0")]), + ) + + text_entry_interaction = TextEntryInteraction( + response_identifier="RESPONSE", + expected_length=10, + pattern_mask="^[0-9]*\\.?[0-9]+$", + placeholder_text="Enter a number", + ) + + assessment_item = AssessmentItem( + identifier="numerical-entry-item", + title="Numerical Entry Question", + language="en-US", + time_dependent=False, + item_body=ItemBody( + children=[ + P(children=["Calculate the value of x when 2x + 5 = 90:"]), + P(children=[text_entry_interaction]), + ] + ), + response_declaration=[response_declaration], + outcome_declaration=[outcome_declaration], + ) + + generated_xml = assessment_item.to_xml_string() + self.assertEqual(generated_xml.strip(), expected_xml.strip()) diff --git a/contentcuration/contentcuration/tests/utils/qti/test_fields.py b/contentcuration/contentcuration/tests/utils/qti/test_fields.py new file mode 100644 index 0000000000..40e4a9c0e5 --- /dev/null +++ b/contentcuration/contentcuration/tests/utils/qti/test_fields.py @@ -0,0 +1,332 @@ +import unittest + +from contentcuration.utils.assessment.qti.fields import validate_data_uri +from contentcuration.utils.assessment.qti.fields import validate_local_href_path +from contentcuration.utils.assessment.qti.fields import validate_local_src_path +from contentcuration.utils.assessment.qti.fields import validate_local_srcset + + +class TestValidateDataUri(unittest.TestCase): + def test_valid_data_uris(self): + valid_uris = [ + "data:text/plain;base64,SGVsbG8=", + "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PchI7wAAAABJRU5ErkJggg==", + "data:text/plain,Hello%20World", + "data:,Hello", + "data:text/html,

Hello

", + 'data:application/json,{"key":"value"}', + "data:text/css,body{color:red}", + "data:image/svg+xml,", + "data:text/plain;charset=utf-8,Hello", + "data:text/plain;charset=utf-8;base64,SGVsbG8=", + ] + + for uri in valid_uris: + with self.subTest(uri=uri): + result = validate_data_uri(uri) + self.assertEqual(result, uri, f"Should return the same URI: {uri}") + + def test_invalid_data_uris(self): + """Test invalid data URI formats""" + invalid_uris = [ + "not-a-data-uri", + "data:", + "data", + "http://example.com", + "https://example.com/image.png", + "ftp://example.com/file.txt", + "file:///path/to/file", + "", + "data:text/plain", + "ata:text/plain,Hello", + ] + + for uri in invalid_uris: + with self.subTest(uri=uri): + with self.assertRaises(ValueError) as cm: + validate_data_uri(uri) + self.assertIn("Invalid data URI format", str(cm.exception)) + + +class TestValidateLocalHrefPath(unittest.TestCase): + def test_valid_relative_paths(self): + """Test valid relative paths""" + valid_paths = [ + "relative/path.jpg", + "../path.jpg", + "./file.png", + "file.txt", + "images/photo.jpg", + "docs/readme.md", + "assets/style.css", + "#fragment", + "?query=value", + "#fragment?query=value", + "path/to/file.html#section", + "subdir/../file.txt", + ] + + for path in valid_paths: + with self.subTest(path=path): + result = validate_local_href_path(path) + self.assertEqual(result, path, f"Should return the same path: {path}") + + def test_valid_data_uris_in_href(self): + data_uris = [ + "data:text/plain,Hello", + "data:image/png;base64,iVBORw0KGgo=", + ] + + for uri in data_uris: + with self.subTest(uri=uri): + result = validate_local_href_path(uri) + self.assertEqual(result, uri) + + def test_invalid_absolute_urls(self): + absolute_urls = [ + "http://example.com", + "https://example.com/path", + "ftp://example.com/file", + "mailto:test@example.com", + "tel:+1234567890", + "//example.com/path", + "/absolute/path", + "/", + ] + + for url in absolute_urls: + with self.subTest(url=url): + with self.assertRaises(ValueError) as cm: + validate_local_href_path(url) + self.assertIn("Absolute URLs not allowed", str(cm.exception)) + + def test_invalid_data_uris_in_href(self): + """Test that invalid data URIs are rejected""" + with self.assertRaises(ValueError) as cm: + validate_local_href_path("data:invalid") + self.assertIn("Invalid data URI format", str(cm.exception)) + + +class TestValidateLocalSrcPath(unittest.TestCase): + def test_valid_src_paths(self): + """Test valid src paths (must have actual file paths)""" + valid_paths = [ + "relative/path.jpg", + "../path.jpg", + "./file.png", + "file.txt", + "images/photo.jpg", + "subdir/../file.txt", + ] + + for path in valid_paths: + with self.subTest(path=path): + result = validate_local_src_path(path) + self.assertEqual(result, path) + + def test_valid_data_uris_in_src(self): + data_uris = [ + "data:text/plain,Hello", + "data:image/png;base64,iVBORw0KGgo=", + ] + + for uri in data_uris: + with self.subTest(uri=uri): + result = validate_local_src_path(uri) + self.assertEqual(result, uri) + + def test_invalid_empty_paths(self): + """Test rejection of empty paths and fragment-only""" + invalid_paths = ["#fragment", "?query=value", "#fragment?query=value"] + + for path in invalid_paths: + with self.subTest(path=path): + with self.assertRaises(ValueError) as cm: + validate_local_src_path(path) + self.assertIn("Invalid local src path", str(cm.exception)) + + def test_absolute_urls_rejected(self): + """Test that absolute URLs are still rejected""" + with self.assertRaises(ValueError) as cm: + validate_local_src_path("http://example.com/image.jpg") + self.assertIn("Absolute URLs not allowed", str(cm.exception)) + + +class TestValidateLocalSrcset(unittest.TestCase): + def test_empty_srcset(self): + empty_values = ["", " ", "\t", "\n"] + + for value in empty_values: + with self.subTest(value=repr(value)): + result = validate_local_srcset(value) + self.assertEqual(result, value) + + def test_single_image_srcset(self): + valid_srcsets = [ + "image.jpg 2x", + "image.jpg 1.5x", + "image.jpg 100w", + "image.jpg 50h", + "image.jpg 0.5x", + "path/to/image.png 2x", + "../images/photo.jpg 1x", + "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PchI7wAAAABJRU5ErkJggg== 2x", + ] + + for srcset in valid_srcsets: + with self.subTest(srcset=srcset): + result = validate_local_srcset(srcset) + self.assertEqual(result, srcset) + + def test_data_uri_in_srcset(self): + valid_data_srcsets = [ + "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PchI7wAAAABJRU5ErkJggg== 1x", + "data:text/plain,Hello%20World 2x", + "data:image/svg+xml, 1.5x", + 'data:application/json,{"key":"value"} 100w', + ] + + for srcset in valid_data_srcsets: + with self.subTest(srcset=srcset): + result = validate_local_srcset(srcset) + self.assertEqual(result, srcset) + + def test_multiple_images_srcset(self): + valid_srcsets = [ + "small.jpg 1x, large.jpg 2x", + "img-320.jpg 320w, img-640.jpg 640w, img-1280.jpg 1280w", + "portrait.jpg 480h, landscape.jpg 960h", + "image1.jpg 1x, image2.jpg 1.5x, image3.jpg 2x", + "a.jpg 1x,b.jpg 2x", # minimal spacing + ] + + for srcset in valid_srcsets: + with self.subTest(srcset=srcset): + result = validate_local_srcset(srcset) + self.assertEqual(result, srcset) + + def test_mixed_data_uri_and_regular_paths(self): + valid_mixed_srcsets = [ + "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PchI7wAAAABJRU5ErkJggg== 1x, large.jpg 2x", + "small.jpg 1x, data:image/svg+xml, 2x", + "icon.png 1x, data:text/plain,fallback 2x, large.png 3x", + ] + + for srcset in valid_mixed_srcsets: + with self.subTest(srcset=srcset): + result = validate_local_srcset(srcset) + self.assertEqual(result, srcset) + + def test_multiple_data_uris_in_srcset(self): + valid_multi_data_srcsets = [ + "data:image/png;base64,ABC123 1x, data:image/png;base64,DEF456 2x", + "data:text/plain,Small,Image 1x, data:text/plain,Large,Image 2x", + "data:image/svg+xml, 1x, data:image/svg+xml, 2x, data:image/svg+xml, 3x", # noqa: E501 + 'data:application/json,{"size":"small"} 100w, data:application/json,{"size":"large"} 200w', + ] + + for srcset in valid_multi_data_srcsets: + with self.subTest(srcset=srcset): + result = validate_local_srcset(srcset) + self.assertEqual(result, srcset) + + def test_complex_mixed_srcsets(self): + complex_srcsets = [ + "thumb.jpg 1x, data:image/png;base64,MID123 1.5x, data:image/svg+xml, 2x, large.jpg 3x", + "data:text/plain,Icon,1 50w, regular-100.jpg 100w, data:text/plain,Icon,2 150w, regular-200.jpg 200w", + ] + + for srcset in complex_srcsets: + with self.subTest(srcset=srcset): + result = validate_local_srcset(srcset) + self.assertEqual(result, srcset) + + def test_invalid_descriptors(self): + """Test rejection of invalid descriptors""" + invalid_srcsets = [ + "image.jpg 2", # missing unit + "image.jpg x", # missing number + "image.jpg 2z", # invalid unit + "image.jpg 2.x", # malformed number + "image.jpg .x", # malformed number + "image.jpg 2xx", # double unit + "image.jpg -2x", # negative number + "image.jpg 2 x", # space in descriptor + ] + + for srcset in invalid_srcsets: + with self.subTest(srcset=srcset): + with self.assertRaises(ValueError): + validate_local_srcset(srcset) + + def test_invalid_urls_in_srcset(self): + invalid_srcsets = [ + "http://example.com/image.jpg 2x", + "https://cdn.example.com/img.png 1x, local.jpg 2x", + "/absolute/path.jpg 1x", + ] + + for srcset in invalid_srcsets: + with self.subTest(srcset=srcset): + with self.assertRaises(ValueError): + validate_local_srcset(srcset) + + def test_empty_srcset_entries(self): + invalid_srcsets = [ + "image.jpg 2x, ,other.jpg 1x", + ", image.jpg 2x", + "image.jpg 2x,", + ] + + for srcset in invalid_srcsets: + with self.subTest(srcset=srcset): + with self.assertRaises(ValueError): + validate_local_srcset(srcset) + + def test_missing_path_in_srcset(self): + invalid_srcsets = [ + "#fragment 2x", + "?query=value 1x", + ] + + for srcset in invalid_srcsets: + with self.subTest(srcset=srcset): + with self.assertRaises(ValueError): + validate_local_srcset(srcset) + + +class TestEdgeCases(unittest.TestCase): + def test_unicode_paths_href(self): + unicode_paths = ["café/ñ.jpg", "文件/图片.png", "файл.txt"] + + for path in unicode_paths: + with self.subTest(path=path): + result = validate_local_href_path(path) + self.assertEqual(result, path) + + def test_unicode_paths_src(self): + unicode_paths = ["café/ñ.jpg", "文件/图片.png", "файл.txt"] + + for path in unicode_paths: + with self.subTest(path=path): + result = validate_local_src_path(path) + self.assertEqual(result, path) + + def test_very_long_paths(self): + long_path = "a/" * 1000 + "file.txt" + + # Should handle long paths gracefully + result = validate_local_href_path(long_path) + self.assertEqual(result, long_path) + + def test_special_characters_in_data_uri(self): + special_data_uris = [ + "data:text/plain,Hello%20World%21", + "data:text/plain,<>&\"'", + 'data:application/json,{"key":"value"}', + ] + + for uri in special_data_uris: + with self.subTest(uri=uri): + result = validate_data_uri(uri) + self.assertEqual(result, uri) diff --git a/contentcuration/contentcuration/tests/utils/qti/test_html.py b/contentcuration/contentcuration/tests/utils/qti/test_html.py new file mode 100644 index 0000000000..dc5d162bc7 --- /dev/null +++ b/contentcuration/contentcuration/tests/utils/qti/test_html.py @@ -0,0 +1,776 @@ +import unittest + +from contentcuration.utils.assessment.qti.base import TextNode +from contentcuration.utils.assessment.qti.html import A +from contentcuration.utils.assessment.qti.html import Abbr +from contentcuration.utils.assessment.qti.html import Address +from contentcuration.utils.assessment.qti.html import Article +from contentcuration.utils.assessment.qti.html import Aside +from contentcuration.utils.assessment.qti.html import Audio +from contentcuration.utils.assessment.qti.html import B +from contentcuration.utils.assessment.qti.html import Bdi +from contentcuration.utils.assessment.qti.html import Bdo +from contentcuration.utils.assessment.qti.html import BdoDir +from contentcuration.utils.assessment.qti.html import Blockquote +from contentcuration.utils.assessment.qti.html import Br +from contentcuration.utils.assessment.qti.html import Caption +from contentcuration.utils.assessment.qti.html import Cite +from contentcuration.utils.assessment.qti.html import Code +from contentcuration.utils.assessment.qti.html import Col +from contentcuration.utils.assessment.qti.html import Colgroup +from contentcuration.utils.assessment.qti.html import Dd +from contentcuration.utils.assessment.qti.html import Details +from contentcuration.utils.assessment.qti.html import Dfn +from contentcuration.utils.assessment.qti.html import Div +from contentcuration.utils.assessment.qti.html import Dl +from contentcuration.utils.assessment.qti.html import Dt +from contentcuration.utils.assessment.qti.html import Em +from contentcuration.utils.assessment.qti.html import Figcaption +from contentcuration.utils.assessment.qti.html import Figure +from contentcuration.utils.assessment.qti.html import Footer +from contentcuration.utils.assessment.qti.html import H1 +from contentcuration.utils.assessment.qti.html import H2 +from contentcuration.utils.assessment.qti.html import H3 +from contentcuration.utils.assessment.qti.html import H4 +from contentcuration.utils.assessment.qti.html import H5 +from contentcuration.utils.assessment.qti.html import H6 +from contentcuration.utils.assessment.qti.html import Header +from contentcuration.utils.assessment.qti.html import Hr +from contentcuration.utils.assessment.qti.html import HTMLElement +from contentcuration.utils.assessment.qti.html import I +from contentcuration.utils.assessment.qti.html import Img +from contentcuration.utils.assessment.qti.html import Kbd +from contentcuration.utils.assessment.qti.html import Label +from contentcuration.utils.assessment.qti.html import Li +from contentcuration.utils.assessment.qti.html import Nav +from contentcuration.utils.assessment.qti.html import Object +from contentcuration.utils.assessment.qti.html import Ol +from contentcuration.utils.assessment.qti.html import OlType +from contentcuration.utils.assessment.qti.html import P +from contentcuration.utils.assessment.qti.html import Param +from contentcuration.utils.assessment.qti.html import Picture +from contentcuration.utils.assessment.qti.html import Pre +from contentcuration.utils.assessment.qti.html import Q +from contentcuration.utils.assessment.qti.html import Rp +from contentcuration.utils.assessment.qti.html import Rt +from contentcuration.utils.assessment.qti.html import Ruby +from contentcuration.utils.assessment.qti.html import Samp +from contentcuration.utils.assessment.qti.html import Section +from contentcuration.utils.assessment.qti.html import Small +from contentcuration.utils.assessment.qti.html import Source +from contentcuration.utils.assessment.qti.html import Span +from contentcuration.utils.assessment.qti.html import Strong +from contentcuration.utils.assessment.qti.html import Sub +from contentcuration.utils.assessment.qti.html import Summary +from contentcuration.utils.assessment.qti.html import Sup +from contentcuration.utils.assessment.qti.html import Table +from contentcuration.utils.assessment.qti.html import TBody +from contentcuration.utils.assessment.qti.html import Td +from contentcuration.utils.assessment.qti.html import TFoot +from contentcuration.utils.assessment.qti.html import Th +from contentcuration.utils.assessment.qti.html import THead +from contentcuration.utils.assessment.qti.html import Tr +from contentcuration.utils.assessment.qti.html import Track +from contentcuration.utils.assessment.qti.html import TrackKind +from contentcuration.utils.assessment.qti.html import Ul +from contentcuration.utils.assessment.qti.html import Var +from contentcuration.utils.assessment.qti.html import Video + + +class HTMLDataClassTests(unittest.TestCase): + def test_break_elements(self): + br_element = Br() + self.assertEqual(br_element.to_xml_string(), "
") + + hr_element = Hr() + self.assertEqual(hr_element.to_xml_string(), "
") + + def test_display_elements(self): + label_element = Label(children=["Test Label"], for_="test") + self.assertEqual( + label_element.to_xml_string(), '' + ) + + summary_element = Summary(children=["Test Summary"]) + self.assertEqual( + summary_element.to_xml_string(), "Test Summary" + ) + + figcaption_element = Figcaption(children=["Test Figcaption"]) + self.assertEqual( + figcaption_element.to_xml_string(), + "
Test Figcaption
", + ) + + def test_details_validation(self): + summary_element = Summary(children=["Test Summary"]) + + # Valid case: Summary as first child + valid_details = Details(children=[summary_element, "Test Content"]) + self.assertEqual( + valid_details.to_xml_string(), + "
Test SummaryTest Content
", + ) + + # Invalid case: No Summary element + with self.assertRaises(ValueError): + Details(children=["Test Content"]) + + # Invalid case: Summary not as first child + with self.assertRaises(ValueError): + Details(children=["Test Content", summary_element]) + + # Invalid case: Multiple Summary elements + second_summary = Summary(children=["Second Summary"]) + with self.assertRaises(ValueError): + Details(children=[summary_element, "Test Content", second_summary]) + + def test_figure_elements(self): + figure_element = Figure(children=["Test Figure"]) + self.assertEqual(figure_element.to_xml_string(), "
Test Figure
") + + figcaption_element = Figcaption(children=["Test Caption"]) + figure_with_caption = Figure(children=[figcaption_element, "Test Content"]) + self.assertEqual( + figure_with_caption.to_xml_string(), + "
Test Caption
Test Content
", + ) + + figure_with_caption_last = Figure(children=["Test Content", figcaption_element]) + self.assertEqual( + figure_with_caption_last.to_xml_string(), + "
Test Content
Test Caption
", + ) + + with self.assertRaises(ValueError): + Figure( + children=[figcaption_element, Figcaption(children=["Second Caption"])] + ) + + with self.assertRaises(ValueError): + Figure(children=["Before", figcaption_element, "After"]) + + def test_embed_elements(self): + img_element = Img(alt="Test Alt", src="test.jpg") + self.assertEqual( + img_element.to_xml_string(), 'Test Alt' + ) + + param_element = Param(name="test_param", value="test_value") + self.assertEqual( + param_element.to_xml_string(), + '', + ) + + object_element = Object(children=["Test Object"], params=[param_element]) + self.assertEqual( + object_element.to_xml_string(), + 'Test Object', + ) + + picture_source_element = Source(srcset="test.jpg 2x") + self.assertEqual( + picture_source_element.to_xml_string(), '' + ) + + picture_element = Picture(children=[picture_source_element], img=img_element) + self.assertEqual( + picture_element.to_xml_string(), + 'Test Alt', + ) + + def test_flow_elements(self): + blockquote_element = Blockquote( + children=["Test Blockquote"], cite="http://test.com" + ) + self.assertEqual( + blockquote_element.to_xml_string(), + '
Test Blockquote
', + ) + + div_element = Div(children=["Test Div"]) + self.assertEqual(div_element.to_xml_string(), "
Test Div
") + + article_element = Article(children=["Test Article"]) + self.assertEqual( + article_element.to_xml_string(), "
Test Article
" + ) + + section_element = Section(children=["Test Section"]) + self.assertEqual( + section_element.to_xml_string(), "
Test Section
" + ) + + nav_element = Nav(children=["Test Nav"]) + self.assertEqual(nav_element.to_xml_string(), "") + + aside_element = Aside(children=["Test Aside"]) + self.assertEqual(aside_element.to_xml_string(), "") + + header_element = Header(children=["Test Header"]) + self.assertEqual(header_element.to_xml_string(), "
Test Header
") + + footer_element = Footer(children=["Test Footer"]) + self.assertEqual(footer_element.to_xml_string(), "
Test Footer
") + + address_element = Address(children=["Test Address"]) + self.assertEqual( + address_element.to_xml_string(), "
Test Address
" + ) + + def test_media_elements(self): + track_element = Track(src="test.vtt", kind=TrackKind.SUBTITLES) + self.assertEqual( + track_element.to_xml_string(), '' + ) + + media_source_element = Source(src="test.mp4") + self.assertEqual( + media_source_element.to_xml_string(), '' + ) + + audio_element = Audio(children=["Test Audio"], src="test.mp3") + self.assertEqual( + audio_element.to_xml_string(), + '', + ) + + video_element = Video(children=["Test Video"], src="test.mp4") + self.assertEqual( + video_element.to_xml_string(), + '', + ) + + def test_sequence_elements(self): + li_element = Li(children=["Test Li"]) + self.assertEqual(li_element.to_xml_string(), "
  • Test Li
  • ") + + ol_element = Ol(children=[li_element], type=OlType.NUMBERS) + self.assertEqual( + ol_element.to_xml_string(), '
    1. Test Li
    ' + ) + + ul_element = Ul(children=[li_element]) + self.assertEqual(ul_element.to_xml_string(), "
    • Test Li
    ") + + dt_element = Dt(children=["Test Dt"]) + self.assertEqual(dt_element.to_xml_string(), "
    Test Dt
    ") + + dd_element = Dd(children=["Test Dd"]) + self.assertEqual(dd_element.to_xml_string(), "
    Test Dd
    ") + + dl_element = Dl(children=[dt_element, dd_element]) + self.assertEqual( + dl_element.to_xml_string(), "
    Test Dt
    Test Dd
    " + ) + + def test_table_elements(self): + caption_element = Caption(children=["Test Caption"]) + self.assertEqual( + caption_element.to_xml_string(), "Test Caption" + ) + + col_element = Col() + self.assertEqual(col_element.to_xml_string(), '') + + colgroup_element = Colgroup(children=[col_element]) + self.assertEqual( + colgroup_element.to_xml_string(), '' + ) + + td_element = Td(children=["Test Td"]) + self.assertEqual(td_element.to_xml_string(), "Test Td") + + th_element = Th(children=["Test Th"]) + self.assertEqual(th_element.to_xml_string(), "Test Th") + + tr_element = Tr(children=[th_element, td_element]) + self.assertEqual( + tr_element.to_xml_string(), "Test ThTest Td" + ) + + tbody_element = TBody(children=[tr_element]) + self.assertEqual( + tbody_element.to_xml_string(), + "Test ThTest Td", + ) + + thead_element = THead(children=[tr_element]) + self.assertEqual( + thead_element.to_xml_string(), + "Test ThTest Td", + ) + + tfoot_element = TFoot(children=[tr_element]) + self.assertEqual( + tfoot_element.to_xml_string(), + "Test ThTest Td", + ) + + table_element = Table( + children=[ + caption_element, + colgroup_element, + thead_element, + tbody_element, + tfoot_element, + ] + ) + expected_html = '
    Test Caption
    Test ThTest Td
    Test ThTest Td
    Test ThTest Td
    ' # noqa: E501 + self.assertEqual(table_element.to_xml_string(), expected_html) + + def test_text_elements(self): + a_element = A(children=["Test A"], href="file.html") + self.assertEqual(a_element.to_xml_string(), 'Test A') + + p_element = P(children=["Test P"]) + self.assertEqual(p_element.to_xml_string(), "

    Test P

    ") + + span_element = Span(children=["Test Span"]) + self.assertEqual(span_element.to_xml_string(), "Test Span") + + h1_element = H1(children=["Test H1"]) + self.assertEqual(h1_element.to_xml_string(), "

    Test H1

    ") + + h2_element = H2(children=["Test H2"]) + self.assertEqual(h2_element.to_xml_string(), "

    Test H2

    ") + + h3_element = H3(children=["Test H3"]) + self.assertEqual(h3_element.to_xml_string(), "

    Test H3

    ") + + h4_element = H4(children=["Test H4"]) + self.assertEqual(h4_element.to_xml_string(), "

    Test H4

    ") + + h5_element = H5(children=["Test H5"]) + self.assertEqual(h5_element.to_xml_string(), "
    Test H5
    ") + + h6_element = H6(children=["Test H6"]) + self.assertEqual(h6_element.to_xml_string(), "
    Test H6
    ") + + pre_element = Pre(children=["Test Pre"]) + self.assertEqual(pre_element.to_xml_string(), "
    Test Pre
    ") + + em_element = Em(children=["Test Em"]) + self.assertEqual(em_element.to_xml_string(), "Test Em") + + code_element = Code(children=["Test Code"]) + self.assertEqual(code_element.to_xml_string(), "Test Code") + + kbd_element = Kbd(children=["Test Kbd"]) + self.assertEqual(kbd_element.to_xml_string(), "Test Kbd") + + i_element = I(children=["Test I"]) + self.assertEqual(i_element.to_xml_string(), "Test I") + + dfn_element = Dfn(children=["Test Dfn"]) + self.assertEqual(dfn_element.to_xml_string(), "Test Dfn") + + abbr_element = Abbr(children=["Test Abbr"]) + self.assertEqual(abbr_element.to_xml_string(), "Test Abbr") + + strong_element = Strong(children=["Test Strong"]) + self.assertEqual(strong_element.to_xml_string(), "Test Strong") + + sup_element = Sup(children=["Test Sup"]) + self.assertEqual(sup_element.to_xml_string(), "Test Sup") + + sub_element = Sub(children=["Test Sub"]) + self.assertEqual(sub_element.to_xml_string(), "Test Sub") + + var_element = Var(children=["Test Var"]) + self.assertEqual(var_element.to_xml_string(), "Test Var") + + small_element = Small(children=["Test Small"]) + self.assertEqual(small_element.to_xml_string(), "Test Small") + + samp_element = Samp(children=["Test Samp"]) + self.assertEqual(samp_element.to_xml_string(), "Test Samp") + + b_element = B(children=["Test B"]) + self.assertEqual(b_element.to_xml_string(), "Test B") + + cite_element = Cite(children=["Test Cite"]) + self.assertEqual(cite_element.to_xml_string(), "Test Cite") + + q_element = Q(children=["Test Q"]) + self.assertEqual(q_element.to_xml_string(), "Test Q") + + bdo_element = Bdo(dir=BdoDir.LTR, children=["Test Bdo"]) + self.assertEqual(bdo_element.to_xml_string(), 'Test Bdo') + + bdi_element = Bdi(children=["Test Bdi"]) + self.assertEqual(bdi_element.to_xml_string(), "Test Bdi") + + rt_element = Rt(children=["Test Rt"]) + self.assertEqual(rt_element.to_xml_string(), "Test Rt") + + rp_element = Rp(text="(") + self.assertEqual(rp_element.to_xml_string(), "(") + + ruby_element = Ruby(children=["Test Ruby"]) + self.assertEqual(ruby_element.to_xml_string(), "Test Ruby") + + +class TestHTMLStringIntegration(unittest.TestCase): + def test_complex_html_parsing(self): + complex_html = """ +
    +

    This is a complex paragraph with emphasis and a + link to example.

    + Test image +
    +
      +
    • First bold item
    • +
    • Second item with internal link
    • +
    • Third item
    • +
    +
      +
    1. Numbered item one
    2. +
    3. Numbered item two
    4. +
    +

    Final paragraph with
    line break.

    + """ + + # Parse the HTML + elements = HTMLElement.from_html_string(complex_html) + + # Should have 4 root elements: div, ul, ol, p + self.assertEqual( + len(elements), 4, f"Expected 4 root elements, got {len(elements)}" + ) + + # Test first element: div with complex content + div_element = elements[0] + self.assertIsInstance(div_element, Div) + self.assertEqual(div_element.class_, "container") + self.assertEqual(div_element.id_, "main") + + # Div should have 2 children: p and img + self.assertEqual(len(div_element.children), 2) + + # Test paragraph inside div + p_element = div_element.children[0] + self.assertIsInstance(p_element, P) + + # Paragraph should have mixed content: text, strong, text, em, text, a, text + p_children = p_element.children + self.assertEqual(len(p_children), 7) + + # Find and test the strong element + strong_element = p_children[1] + self.assertEqual(len(strong_element.children), 1) + self.assertIsInstance(strong_element.children[0], TextNode) + self.assertEqual(strong_element.children[0].text, "complex") + + # Find and test the em element + em_element = p_children[3] + self.assertEqual(len(em_element.children), 1) + self.assertEqual(em_element.children[0].text, "emphasis") + + # Find and test the link element + a_element = p_children[5] + self.assertEqual(str(a_element.href), "file.html#anchor") + self.assertEqual(len(a_element.children), 1) + self.assertEqual(a_element.children[0].text, "link to example") + + # Test image element + img_element = div_element.children[1] + self.assertIsInstance(img_element, Img) + self.assertEqual(str(img_element.src), "image.jpg") + self.assertEqual(img_element.alt, "Test image") + self.assertEqual(img_element.width, 300) + self.assertEqual(img_element.height, 200) + + # Test second element: unordered list + ul_element = elements[1] + self.assertIsInstance(ul_element, Ul) + self.assertEqual(len(ul_element.children), 3) + + # Test first list item + li1 = ul_element.children[0] + self.assertIsInstance(li1, Li) + li1_children = li1.children + # Should have: TextNode("First "), Strong("bold"), TextNode(" item") + self.assertEqual(len(li1_children), 3) + + # Find strong in first list item + li1_strong = li1_children[1] + self.assertEqual(li1_strong.children[0].text, "bold") + + # Test second list item with link + li2 = ul_element.children[1] + self.assertIsInstance(li2, Li) + li2_link = li2.children[1] + self.assertEqual(li2_link.href, "page2.html") + + # Test third element: ordered list + ol_element = elements[2] + self.assertIsInstance(ol_element, Ol) + self.assertEqual(len(ol_element.children), 2) + + # Test ordered list items + ol_li1 = ol_element.children[0] + self.assertIsInstance(ol_li1, Li) + + ol_li2 = ol_element.children[1] + self.assertIsInstance(ol_li2, Li) + ol_li2_em = ol_li2.children[1] + self.assertEqual(ol_li2_em.children[0].text, "two") + + # Test fourth element: paragraph with line break + final_p = elements[3] + self.assertIsInstance(final_p, P) + br_element = final_p.children[1] + self.assertIsInstance(br_element, Br) + + def test_simple_html_parsing(self): + """Test parsing simple HTML elements""" + + simple_html = "

    Hello world!

    " + elements = HTMLElement.from_html_string(simple_html) + + self.assertEqual(len(elements), 1) + p = elements[0] + self.assertIsInstance(p, P) + self.assertEqual(len(p.children), 3) + + # Check strong element + strong = p.children[1] + self.assertIsInstance(strong, Strong) + self.assertEqual(strong.children[0].text, "world") + + def test_empty_and_self_closing_elements(self): + """Test parsing empty elements and self-closing tags""" + + html = """ +

    + test +
    +
    + """ + + elements = HTMLElement.from_html_string(html) + self.assertEqual(len(elements), 4) + + # Empty paragraph + self.assertIsInstance(elements[0], P) + self.assertEqual(len(elements[0].children), 0) + + # Image with attributes + self.assertIsInstance(elements[1], Img) + self.assertEqual(elements[1].src, "test.jpg") + self.assertEqual(elements[1].alt, "test") + + # Line break + self.assertIsInstance(elements[2], Br) + + # Div with empty span + self.assertIsInstance(elements[3], Div) + self.assertEqual(len(elements[3].children), 1) + self.assertIsInstance(elements[3].children[0], Span) + self.assertEqual(len(elements[3].children[0].children), 0) + + def test_roundtrip_conversion(self): + """Test that HTML -> Pydantic -> XML maintains structure""" + + original_html = """ +

    Test bold and italic text.

    +
      +
    • Item 1
    • +
    • Item 2
    • +
    + """ + + # Parse to Pydantic objects + elements = HTMLElement.from_html_string(original_html) + + # Convert back to XML strings + xml_output = "".join(elem.to_xml_string() for elem in elements) + + self.assertEqual( + "".join(m.strip() for m in original_html.split("\n")), xml_output.strip() + ) + + def test_attribute_type_conversion(self): + """Test that attributes are properly converted to correct types""" + + html = """ +
    + Link + Alt text +
    + """ + + elements = HTMLElement.from_html_string(html) + div = elements[0] + + # Test div attributes + self.assertEqual(div.class_, "test-class") + self.assertEqual(div.id_, "test-id") + + # Test link attributes + a = div.children[0] + self.assertEqual(a.href, "file.html?query=test") + + # Test image attributes + img = div.children[1] + self.assertEqual(img.src, "image.png") + self.assertEqual(img.alt, "Alt text") + self.assertEqual(img.width, 100) + self.assertEqual(img.height, 50) + + +class TestFileDependencies(unittest.TestCase): + def test_img_src_dependencies(self): + img = Img(src="image.jpg", alt="Test image") + dependencies = img.get_file_dependencies() + self.assertEqual(dependencies, ["image.jpg"]) + + def test_img_srcset_dependencies(self): + img = Img( + src="fallback.jpg", + srcset="small.jpg 480w, medium.jpg 800w, large.jpg 1200w", + alt="Responsive image", + ) + dependencies = img.get_file_dependencies() + self.assertEqual( + set(dependencies), {"fallback.jpg", "small.jpg", "medium.jpg", "large.jpg"} + ) + + def test_img_srcset_with_density_descriptors(self): + img = Img( + src="image.jpg", + srcset="image.jpg 1x, image@2x.jpg 2x, image@3x.jpg 3x", + alt="High DPI image", + ) + dependencies = img.get_file_dependencies() + self.assertEqual( + set(dependencies), {"image.jpg", "image@2x.jpg", "image@3x.jpg"} + ) + + def test_a_href_dependencies(self): + a = A(href="document.pdf", children=["Download PDF"]) + dependencies = a.get_file_dependencies() + self.assertEqual(dependencies, ["document.pdf"]) + + def test_audio_src_dependencies(self): + audio = Audio(src="audio.mp3", children=["Audio not supported"]) + dependencies = audio.get_file_dependencies() + self.assertEqual(dependencies, ["audio.mp3"]) + + def test_video_src_dependencies(self): + video = Video(src="video.mp4", children=["Video not supported"]) + dependencies = video.get_file_dependencies() + self.assertEqual(dependencies, ["video.mp4"]) + + def test_source_src_dependencies(self): + source = Source(src="video.webm") + dependencies = source.get_file_dependencies() + self.assertEqual(dependencies, ["video.webm"]) + + def test_source_srcset_dependencies(self): + source = Source(srcset="banner-480.jpg 480w, banner-800.jpg 800w") + dependencies = source.get_file_dependencies() + self.assertEqual(set(dependencies), {"banner-480.jpg", "banner-800.jpg"}) + + def test_track_src_dependencies(self): + track = Track(src="subtitles.vtt", kind="subtitles") + dependencies = track.get_file_dependencies() + self.assertEqual(dependencies, ["subtitles.vtt"]) + + def test_blockquote_cite_dependencies(self): + blockquote = Blockquote( + cite="https://example.com/source.html", children=["Quote text"] + ) + dependencies = blockquote.get_file_dependencies() + # HttpUrl attributes are not included in file dependencies as they're external + self.assertEqual(dependencies, []) + + def test_nested_element_dependencies(self): + img = Img(src="nested.jpg", alt="Nested image") + link = A(href="page.html", children=["Link text"]) + div = Div(children=[img, link, "Some text"]) + + dependencies = div.get_file_dependencies() + self.assertEqual(set(dependencies), {"nested.jpg", "page.html"}) + + def test_complex_nested_dependencies(self): + # Create a complex structure with multiple file dependencies + img1 = Img(src="image1.jpg", alt="Image 1") + img2 = Img( + src="image2.png", + srcset="image2-small.png 480w, image2-large.png 1200w", + alt="Image 2", + ) + link = A(href="document.pdf", children=["Download"]) + audio = Audio(src="background.mp3", children=["Audio"]) + + source1 = Source(src="video.webm") + source2 = Source(src="video.mp4") + video = Video(children=[source1, source2, "Video not supported"]) + + root_div = Div(children=[img1, img2, link, audio, video]) + + dependencies = root_div.get_file_dependencies() + expected = [ + "image1.jpg", + "image2.png", + "image2-small.png", + "image2-large.png", + "document.pdf", + "background.mp3", + "video.webm", + "video.mp4", + ] + self.assertEqual(set(dependencies), set(expected)) + + def test_picture_element_dependencies(self): + source1 = Source(srcset="mobile.jpg 480w, tablet.jpg 800w") + source2 = Source(srcset="desktop.jpg 1200w") + img = Img(src="fallback.jpg", alt="Picture") + picture = Picture(children=[source1, source2], img=img) + + dependencies = picture.get_file_dependencies() + expected = ["mobile.jpg", "tablet.jpg", "desktop.jpg", "fallback.jpg"] + self.assertEqual(set(dependencies), set(expected)) + + def test_table_with_dependencies(self): + img_cell = Td(children=[Img(src="table-image.jpg", alt="Table image")]) + link_cell = Td(children=[A(href="table-link.html", children=["Link"])]) + row = Tr(children=[img_cell, link_cell]) + table = Table(children=[row]) + + dependencies = table.get_file_dependencies() + self.assertEqual(set(dependencies), {"table-image.jpg", "table-link.html"}) + + def test_no_dependencies(self): + p = P(children=["Just text content"]) + dependencies = p.get_file_dependencies() + self.assertEqual(dependencies, []) + + def test_empty_srcset(self): + # Test that empty srcset doesn't break anything + img = Img(src="image.jpg", alt="Image") + dependencies = img.get_file_dependencies() + self.assertEqual(dependencies, ["image.jpg"]) + + def test_duplicate_dependencies_removed(self): + # Test that duplicate file paths are only included once + img1 = Img(src="same.jpg", alt="Image 1") + img2 = Img(src="same.jpg", alt="Image 2") + div = Div(children=[img1, img2]) + + dependencies = div.get_file_dependencies() + self.assertEqual(dependencies, ["same.jpg"]) + + def test_mixed_srcset_formats(self): + # Test srcset with mixed width and density descriptors + img = Img( + src="base.jpg", + srcset="small.jpg 300w, medium.jpg 1.5x, large.jpg 2x", + alt="Mixed srcset", + ) + dependencies = img.get_file_dependencies() + self.assertEqual( + set(dependencies), {"base.jpg", "small.jpg", "medium.jpg", "large.jpg"} + ) diff --git a/contentcuration/contentcuration/tests/utils/qti/test_imsmanifest.py b/contentcuration/contentcuration/tests/utils/qti/test_imsmanifest.py new file mode 100644 index 0000000000..949b88ffdd --- /dev/null +++ b/contentcuration/contentcuration/tests/utils/qti/test_imsmanifest.py @@ -0,0 +1,204 @@ +import unittest + +from contentcuration.utils.assessment.qti.imsmanifest import Dependency +from contentcuration.utils.assessment.qti.imsmanifest import File +from contentcuration.utils.assessment.qti.imsmanifest import Item +from contentcuration.utils.assessment.qti.imsmanifest import Manifest +from contentcuration.utils.assessment.qti.imsmanifest import Metadata +from contentcuration.utils.assessment.qti.imsmanifest import Organization +from contentcuration.utils.assessment.qti.imsmanifest import Organizations +from contentcuration.utils.assessment.qti.imsmanifest import Resource +from contentcuration.utils.assessment.qti.imsmanifest import Resources + + +class TestManifestXMLOutput(unittest.TestCase): + def test_metadata_to_xml_string(self): + metadata = Metadata(schema="test_schema", schemaversion="1.0") + expected_xml = "test_schema1.0" + self.assertEqual(metadata.to_xml_string(), expected_xml) + + metadata = Metadata() + expected_xml = "" + self.assertEqual(metadata.to_xml_string(), expected_xml) + + def test_item_to_xml_string(self): + item = Item(identifier="item1", identifierref="ref1") + expected_xml = '' + self.assertEqual(item.to_xml_string(), expected_xml) + + item = Item() + expected_xml = "" + self.assertEqual(item.to_xml_string(), expected_xml) + + def test_organization_to_xml_string(self): + item1 = Item(identifier="item1") + item2 = Item(identifier="item2") + organization = Organization( + identifier="org1", + structure="hierarchical", + title="Test Org", + item=[item1, item2], + ) + expected_xml = '' # noqa: E501 + self.assertEqual(organization.to_xml_string(), expected_xml) + + organization = Organization() + expected_xml = "" + self.assertEqual(organization.to_xml_string(), expected_xml) + + def test_organizations_to_xml_string(self): + org1 = Organization(identifier="org1") + org2 = Organization(identifier="org2") + organizations = Organizations(organizations=[org1, org2]) + expected_xml = '' + self.assertEqual(organizations.to_xml_string(), expected_xml) + organizations = Organizations() + expected_xml = "" + self.assertEqual(organizations.to_xml_string(), expected_xml) + + def test_file_to_xml_string(self): + file = File(href="test.html") + expected_xml = '' + self.assertEqual(file.to_xml_string(), expected_xml) + file = File() + expected_xml = "" + self.assertEqual(file.to_xml_string(), expected_xml) + + def test_resource_to_xml_string(self): + file1 = File(href="file1.html") + file2 = File(href="file2.html") + resource = Resource( + identifier="res1", type_="webcontent", href="res.zip", files=[file1, file2] + ) + expected_xml = '' + self.assertEqual(resource.to_xml_string(), expected_xml) + + resource = Resource(identifier="res1", type_="webcontent") + expected_xml = '' + self.assertEqual(resource.to_xml_string(), expected_xml) + + def test_resources_to_xml_string(self): + res1 = Resource(identifier="res1", type_="webcontent") + res2 = Resource(identifier="res2", type_="imscp") + resources = Resources(resources=[res1, res2]) + expected_xml = '' + self.assertEqual(resources.to_xml_string(), expected_xml) + resources = Resources() + expected_xml = "" + self.assertEqual(resources.to_xml_string(), expected_xml) + + def test_imsmanifest_to_xml_string(self): + metadata = Metadata(schema="test_schema", schemaversion="1.0") + organizations = Organizations(organizations=[Organization(identifier="org1")]) + resources = Resources( + resources=[Resource(identifier="res1", type_="webcontent")] + ) + manifest = Manifest( + identifier="manifest1", + version="1.0", + metadata=metadata, + organizations=organizations, + resources=resources, + ) + expected_xml = ( + "' # noqa: E501 + "test_schema1.0" + '' + '' + "" + ) + self.assertEqual(manifest.to_xml_string(), expected_xml) + + manifest = Manifest(identifier="democracy_manifest") + expected_xml = ( + '' + "" + "" + "" + "" + ) + self.assertEqual(manifest.to_xml_string(), expected_xml) + + def test_imsmanifest_full_integration(self): + manifest = Manifest( + identifier="level1-T1-test-entry", + version="1.0", + metadata=Metadata(schema="QTI Package", schemaversion="3.0.0"), + organizations=Organizations(), + resources=Resources( + resources=[ + Resource( + identifier="t1-test-entry-item1", + type_="imsqti_item_xmlv3p0", + href="items/choice-single-cardinality.xml", + files=[File(href="items/choice-single-cardinality.xml")], + dependencies=[Dependency(identifierref="image_resource_1")], + ), + Resource( + type_="webcontent", + identifier="image_resource_1", + href="items/images/badger.svg", + files=[File(href="items/images/badger.svg")], + ), + Resource( + identifier="t1-test-entry-item2", + type_="imsqti_item_xmlv3p0", + href="items/choice-multiple-cardinality.xml", + files=[File(href="items/choice-multiple-cardinality.xml")], + ), + Resource( + identifier="t1-test-entry-item3", + type_="imsqti_item_xmlv3p0", + href="items/text-entry.xml", + files=[File(href="items/text-entry.xml")], + ), + Resource( + identifier="t1-test-entry-item4", + type_="imsqti_item_xmlv3p0", + href="items/extended-text.xml", + files=[File(href="items/extended-text.xml")], + ), + Resource( + identifier="t1-test-entry", + type_="imsqti_test_xmlv3p0", + href="assessment.xml", + files=[File(href="assessment.xml")], + ), + ] + ), + ) + + expected_xml = ( + '' # noqa: E501 + "QTI Package3.0.0" + "" + "" + '' + '' + '' + "" + '' + '' + "" + '' + '' + "" + '' + '' + "" + '' + '' + "" + '' + '' + "" + "" + "" + ) + self.assertEqual(manifest.to_xml_string(), expected_xml) diff --git a/contentcuration/contentcuration/tests/utils/qti/test_mathml.py b/contentcuration/contentcuration/tests/utils/qti/test_mathml.py new file mode 100644 index 0000000000..0bace05336 --- /dev/null +++ b/contentcuration/contentcuration/tests/utils/qti/test_mathml.py @@ -0,0 +1,1613 @@ +""" +This test suite was initially generated using Gemini 2.5 Pro Preview. +It was then manually refined to ensure correctness and completeness. +This was then supplemented with additional tests to cover missing edge cases +and validations using Claude Sonnet 4. + +Gemini prompt: +Please write a comprehensive test suite for this, assuming that everything defined +in these files can be imported from `contentcuration.utils.assessment.qti.mathml`. +I am more concerned with integration level testing - checking that appropriately +composed objects produce the correct MathML output when the to_xml_string method +is invoked, and that conversely, appropriate object structures are created +using the from_string method. + + +Claude prompt: +I have these files that define Pydantic objects for generating and validating MathML. +Here are my current tests for this. Please tell me what the tests cover well, and what is missing. +Formulate recommendations to supplement these tests, where testing conformance to the +MathML Core schema is most important, and testing specific quirks of the implementation is not at all important. +Where possible, generate a separate artifact for each separate additional set of tests, +so that I can choose which ones I want to include more easily. +""" +import unittest + +from pydantic import ValidationError + +from contentcuration.utils.assessment.qti.base import TextNode +from contentcuration.utils.assessment.qti.constants import Dir +from contentcuration.utils.assessment.qti.mathml import Annotation +from contentcuration.utils.assessment.qti.mathml import AnnotationXml +from contentcuration.utils.assessment.qti.mathml import Math +from contentcuration.utils.assessment.qti.mathml import MathMLDisplay +from contentcuration.utils.assessment.qti.mathml import MathMLElement +from contentcuration.utils.assessment.qti.mathml import MathMLForm +from contentcuration.utils.assessment.qti.mathml import Mfrac +from contentcuration.utils.assessment.qti.mathml import Mi +from contentcuration.utils.assessment.qti.mathml import Mn +from contentcuration.utils.assessment.qti.mathml import Mo +from contentcuration.utils.assessment.qti.mathml import Mrow +from contentcuration.utils.assessment.qti.mathml import Mspace +from contentcuration.utils.assessment.qti.mathml import Msubsup +from contentcuration.utils.assessment.qti.mathml import Mtable +from contentcuration.utils.assessment.qti.mathml import Mtd +from contentcuration.utils.assessment.qti.mathml import Mtr +from contentcuration.utils.assessment.qti.mathml import Semantics +from contentcuration.utils.assessment.qti.mathml.base import MathMLGroupingElement +from contentcuration.utils.assessment.qti.mathml.base import MathMLLayoutElement +from contentcuration.utils.assessment.qti.mathml.base import MathMLScriptElement +from contentcuration.utils.assessment.qti.mathml.base import MathMLTokenElement +from contentcuration.utils.assessment.qti.mathml.core import Merror +from contentcuration.utils.assessment.qti.mathml.core import Mmultiscripts +from contentcuration.utils.assessment.qti.mathml.core import Mover +from contentcuration.utils.assessment.qti.mathml.core import Mphantom +from contentcuration.utils.assessment.qti.mathml.core import Mprescripts +from contentcuration.utils.assessment.qti.mathml.core import Mroot +from contentcuration.utils.assessment.qti.mathml.core import Ms +from contentcuration.utils.assessment.qti.mathml.core import Msqrt +from contentcuration.utils.assessment.qti.mathml.core import Mstyle +from contentcuration.utils.assessment.qti.mathml.core import Msub +from contentcuration.utils.assessment.qti.mathml.core import Msup +from contentcuration.utils.assessment.qti.mathml.core import Mtext +from contentcuration.utils.assessment.qti.mathml.core import Munder +from contentcuration.utils.assessment.qti.mathml.core import Munderover + + +class TestFieldValidation(unittest.TestCase): + """Tests for field validation using the annotated types and enums.""" + + def test_length_percentage_valid_values(self): + valid_values = [ + "0", # unitless zero + "10px", # pixels + "2em", + "1.5em", # em units + "0.5rem", # rem units + "2pt", + "12pt", # points + "1in", + "2.5in", # inches + "1cm", + "10mm", # metric + "50%", + "100%", + "0%", + "150%", # percentages + "+10px", + "-5px", # signed values + "0.1vh", + "50vw", + "10vmin", + "20vmax", # viewport units + "1ch", + "2ex", # character units + ] + + for value in valid_values: + with self.subTest(value=value): + # Test on mathsize attribute + obj = Mi(mathsize=value, children=["x"]) + self.assertEqual(obj.mathsize, value) + + # Test on width attribute of Mspace + space_obj = Mspace(width=value) + self.assertEqual(space_obj.width, value) + + def test_length_percentage_invalid_values(self): + invalid_values = [ + "10", # number without unit (except 0) + "px", # unit without number + "10 px", # space in value + "10px ", # trailing space + " 10px", # leading space + "10px;", # invalid character + "10xyz", # invalid unit + "auto", # keyword values not allowed + "inherit", # keyword values not allowed + "", # empty string + "10px 20px", # multiple values + ] + + for value in invalid_values: + with self.subTest(value=value): + with self.assertRaises(ValidationError): + Mi(mathsize=value, children=["x"]) + + def test_color_value_valid_values(self): + valid_values = [ + "red", + "blue", + "green", + "black", + "white", # named colors + "transparent", + "currentColor", # special keywords + "#f00", + "#ff0000", + "#FF0000", # hex colors (3,6 chars) + "#ffff", + "#ffffffff", # hex with alpha (4,8 chars) + "rgb(255,0,0)", + "rgb(255, 0, 0)", # rgb function + "rgba(255,0,0,0.5)", + "rgba(255, 0, 0, 1)", # rgba function + "hsl(0,100%,50%)", + "hsl(0, 100%, 50%)", # hsl function + "hsla(0,100%,50%,0.5)", # hsla function + ] + + for value in valid_values: + with self.subTest(value=value): + obj = Mi(mathcolor=value, children=["x"]) + self.assertEqual(obj.mathcolor, value) + + def test_color_value_invalid_values(self): + """ + Note that we do not validate color names against a predefined list, + as this would require a comprehensive list of valid CSS color names. + Instead, we focus on the format of the color value. + We also do not validate that number values in rgb/rgba are within 0-255 range, + as CSS allows values outside this range (e.g., rgb(300, -50, 500)). + """ + invalid_values = [ + "#ff", # too short hex + "#fffffffff", # too long hex + "#gggggg", # invalid hex characters + "rgb()", # empty rgb + "hsl()", # empty hsl + "", # empty string + "rgb(255 0 0)", # space instead of comma (CSS4 syntax) + ] + + for value in invalid_values: + with self.subTest(value=value): + with self.assertRaises(ValidationError): + Mi(mathcolor=value, children=["x"]) + + def test_script_level_valid_values(self): + valid_values = [ + "0", + "1", + "2", + "-1", + "-2", # basic integers + "+1", + "+2", + "+10", # explicit positive + "-10", + "-100", # negative + ] + + for value in valid_values: + with self.subTest(value=value): + obj = Mi(scriptlevel=value, children=["x"]) + self.assertEqual(obj.scriptlevel, value) + + def test_script_level_invalid_values(self): + """Test invalid ScriptLevel values.""" + invalid_values = [ + "1.5", # decimal not allowed + "one", # word not allowed + "", # empty string + " 1", # leading space + "1 ", # trailing space + "++1", # double sign + "+-1", # mixed signs + ] + + for value in invalid_values: + with self.subTest(value=value): + with self.assertRaises(ValidationError): + Mi(scriptlevel=value, children=["x"]) + + def test_enum_validation(self): + """Test enum field validation.""" + # Valid enum values + math_obj = Math(display=MathMLDisplay.BLOCK, children=[]) + self.assertEqual(math_obj.display, MathMLDisplay.BLOCK) + + mo_obj = Mo(form=MathMLForm.INFIX, children=["+"]) + self.assertEqual(mo_obj.form, MathMLForm.INFIX) + + # Invalid enum values should raise ValidationError + with self.assertRaises(ValidationError): + Math(display="invalid_display", children=[]) + + with self.assertRaises(ValidationError): + Mo(form="invalid_form", children=["+"]) + + def test_boolean_attribute_validation(self): + """Test boolean attribute handling.""" + # Valid boolean values + mo_obj = Mo(fence=True, separator=False, children=["|"]) + self.assertTrue(mo_obj.fence) + self.assertFalse(mo_obj.separator) + + # Boolean attributes should accept actual booleans + mo_obj2 = Mo(stretchy=True, symmetric=False, children=["("]) + self.assertTrue(mo_obj2.stretchy) + self.assertFalse(mo_obj2.symmetric) + + +class TestElementConstraints(unittest.TestCase): + """Tests for MathML element structural constraints and children requirements.""" + + def test_token_elements_children_constraints(self): + """Test that token elements only accept TextType children.""" + text_node = "content" + math_element = Mi(children=["x"]) # Invalid child for token elements + + # Valid: token elements with TextType children + token_classes = [Mi, Mn, Mo, Mtext, Ms, Annotation] + + for token_class in token_classes: + with self.subTest(element=token_class.__name__): + # Valid: TextType children + element = token_class(children=[text_node]) + self.assertEqual(len(element.children), 1) + self.assertIsInstance(element.children[0], TextNode) + + # Invalid: MathML element children should fail + with self.assertRaises( + ValidationError, + msg=f"{token_class.__name__} should reject MathML element children", + ): + token_class(children=[math_element]) + + # Mspace should not have children (it's empty) + mspace = Mspace() + self.assertFalse( + hasattr(mspace, "children") or len(getattr(mspace, "children", [])) > 0 + ) + + def test_elements_with_exactly_two_children(self): + """Test elements that require exactly 2 children.""" + child1 = Mi(children=["a"]) + child2 = Mn(children=["1"]) + child3 = Mi(children=["b"]) + + # These elements should accept exactly 2 children + two_child_classes = [ + (Mfrac, "fraction"), + (Mroot, "root"), + (Msub, "subscript"), + (Msup, "superscript"), + (Munder, "under"), + (Mover, "over"), + ] + + for element_class, description in two_child_classes: + with self.subTest(element=element_class.__name__): + # Valid: exactly 2 children + element = element_class(children=[child1, child2]) + self.assertEqual( + len(element.children), + 2, + f"{description} element should have exactly 2 children", + ) + + # Invalid: 1 child should fail + with self.assertRaises( + ValidationError, msg=f"{description} should reject 1 child" + ): + element_class(children=[child1]) + + # Invalid: 3 children should fail + with self.assertRaises( + ValidationError, msg=f"{description} should reject 3 children" + ): + element_class(children=[child1, child2, child3]) + + def test_elements_with_exactly_three_children(self): + """Test elements that require exactly 3 children.""" + child1 = Mi(children=["base"]) + child2 = Mn(children=["sub"]) + child3 = Mn(children=["sup"]) + child4 = Mi(children=["extra"]) + + # These elements should accept exactly 3 children + three_child_classes = [ + (Msubsup, "subscript-superscript"), + (Munderover, "under-over"), + ] + + for element_class, description in three_child_classes: + with self.subTest(element=element_class.__name__): + # Valid: exactly 3 children + element = element_class(children=[child1, child2, child3]) + self.assertEqual( + len(element.children), + 3, + f"{description} element should have exactly 3 children", + ) + + # Invalid: 2 children should fail + with self.assertRaises( + ValidationError, msg=f"{description} should reject 2 children" + ): + element_class(children=[child1, child2]) + + # Invalid: 4 children should fail + with self.assertRaises( + ValidationError, msg=f"{description} should reject 4 children" + ): + element_class(children=[child1, child2, child3, child4]) + + def test_table_structure_constraints(self): + """Test table element structural requirements.""" + # Valid table structure + cell_content = Mi(children=["cell"]) + mtd = Mtd(children=[cell_content]) + self.assertEqual(len(mtd.children), 1) + + # Mtr should contain Mtd elements + mtr = Mtr(children=[mtd]) + self.assertEqual(len(mtr.children), 1) + self.assertIsInstance(mtr.children[0], Mtd) + + # Mtable should contain Mtr elements + mtable = Mtable(children=[mtr]) + self.assertEqual(len(mtable.children), 1) + self.assertIsInstance(mtable.children[0], Mtr) + + # Invalid: Mtr with non-Mtd children should fail + non_mtd_element = Mi(children=["invalid"]) + with self.assertRaises( + ValidationError, msg="Mtr should reject non-Mtd children" + ): + Mtr(children=[non_mtd_element]) + + # Invalid: Mtable with non-Mtr children should fail + non_mtr_element = Mtd(children=[cell_content]) + with self.assertRaises( + ValidationError, msg="Mtable should reject non-Mtr children" + ): + Mtable(children=[non_mtr_element]) + + def test_semantics_element_constraints(self): + """Test Semantics element structure.""" + # First child should be presentation content + presentation = Mi(children=["x"]) + annotation = Annotation(encoding="text/plain", children=["variable x"]) + annotation_xml = AnnotationXml( + encoding="application/mathml+xml", children=[presentation] + ) + + # Valid semantics structures + semantics1 = Semantics(children=[presentation, annotation]) + semantics2 = Semantics(children=[presentation, annotation_xml]) + semantics3 = Semantics(children=[presentation, annotation, annotation_xml]) + + self.assertEqual(len(semantics1.children), 2) + self.assertEqual(len(semantics2.children), 2) + self.assertEqual(len(semantics3.children), 3) + + # Invalid: Semantics with no children should fail + with self.assertRaises( + ValidationError, msg="Semantics should require at least one child" + ): + Semantics(children=[]) + + # Invalid: Semantics with only annotations (no presentation content) should fail + with self.assertRaises( + ValidationError, + msg="Semantics should require presentation content as first child", + ): + Semantics(children=[annotation]) + + def test_mmultiscripts_structure(self): + """Test Mmultiscripts element structure constraints.""" + base = Mi(children=["F"]) + sub1 = Mn(children=["1"]) + sup1 = Mn(children=["2"]) + + # Basic multiscripts structure + mmultiscripts = Mmultiscripts(children=[base, sub1, sup1]) + self.assertEqual(len(mmultiscripts.children), 3) + + # With prescripts + prescripts = Mprescripts() + pre_sub = Mn(children=["0"]) + pre_sup = Mn(children=["3"]) + + mmultiscripts_with_pre = Mmultiscripts( + children=[base, sub1, sup1, prescripts, pre_sub, pre_sup] + ) + self.assertEqual(len(mmultiscripts_with_pre.children), 6) + + def test_mmultiscripts_validation(self): + """Test Mmultiscripts validation rules.""" + base = Mi(children=["F"]) + sub1 = Mn(children=["1"]) + sup1 = Mn(children=["2"]) + sub2 = Mn(children=["3"]) + sup2 = Mn(children=["4"]) + prescripts = Mprescripts() + + # Test: Empty mmultiscripts should fail + with self.assertRaises( + ValidationError, msg="Empty mmultiscripts should be invalid" + ): + Mmultiscripts(children=[]) + + # Test: Odd number of scripts (without prescripts) should fail + with self.assertRaises( + ValidationError, msg="Odd number of scripts should be invalid" + ): + Mmultiscripts(children=[base, sub1]) # Missing superscript + + # Test: Scripts must come in pairs after base + with self.assertRaises(ValidationError, msg="Scripts must be paired"): + Mmultiscripts( + children=[base, sub1, sup1, sub2] + ) # Missing final superscript + + # Test: Post-scripts must be in pairs when prescripts present + with self.assertRaises(ValidationError, msg="Post-scripts must be paired"): + Mmultiscripts( + children=[base, sub1, prescripts, sub2, sup2] + ) # Odd post-scripts + + # Test: Pre-scripts must be in pairs when prescripts present + with self.assertRaises(ValidationError, msg="Pre-scripts must be paired"): + Mmultiscripts( + children=[base, sub1, sup1, prescripts, sub2] + ) # Odd pre-scripts + + # Test: Multiple prescripts should fail + with self.assertRaises( + ValidationError, msg="Multiple prescripts should be invalid" + ): + Mmultiscripts(children=[base, sub1, sup1, prescripts, prescripts]) + + # Test: Valid cases should pass + # Valid: Base only + Mmultiscripts(children=[base]) + + # Valid: Base with paired scripts + Mmultiscripts(children=[base, sub1, sup1]) + + # Valid: Base with multiple paired scripts + Mmultiscripts(children=[base, sub1, sup1, sub2, sup2]) + + # Valid: Base with prescripts and paired pre-scripts + Mmultiscripts(children=[base, prescripts, sub1, sup1]) + + # Valid: Base with post-scripts and pre-scripts + Mmultiscripts(children=[base, sub1, sup1, prescripts, sub2, sup2]) + + def test_empty_elements_validation(self): + """Test elements that can be empty vs those that cannot.""" + # Elements that can be empty + empty_allowed_classes = [ + (Mrow, "row"), + (Mstyle, "style"), + (Merror, "error"), + (Mphantom, "phantom"), + (Msqrt, "square root"), + (Math, "math root"), + ] + + for element_class, description in empty_allowed_classes: + with self.subTest(element=element_class.__name__): + element = element_class(children=[]) + self.assertEqual( + len(element.children), + 0, + f"{description} element should allow empty children", + ) + + # Mspace is inherently empty (no children attribute with content) + mspace = Mspace(width="1em", height="1em") + self.assertIsNotNone(mspace) + + def test_mixed_content_validation(self): + """Test elements that accept mixed content (text + elements).""" + text_before = "Before " + element = Mi(children=["x"]) + text_after = " after" + + # These elements should accept mixed content + mixed_content_classes = [ + (Mrow, "row"), + (Mstyle, "style"), + (Merror, "error"), + (Mphantom, "phantom"), + ] + + for element_class, description in mixed_content_classes: + with self.subTest(element=element_class.__name__): + mixed_element = element_class( + children=[text_before, element, text_after] + ) + self.assertEqual( + len(mixed_element.children), + 3, + f"{description} element should accept mixed content", + ) + self.assertIsInstance(mixed_element.children[0], TextNode) + self.assertIsInstance(mixed_element.children[1], Mi) + self.assertIsInstance(mixed_element.children[2], TextNode) + + def test_annotation_xml_element_name(self): + """Test that AnnotationXml serializes with correct element name.""" + annotation_xml = AnnotationXml(encoding="application/mathml+xml") + expected_name = "annotation-xml" + actual_name = annotation_xml.element_name() + self.assertEqual(actual_name, expected_name) + + def test_mtable_with_complex_structure(self): + """Test complex table structures.""" + # Create a 2x2 table + cell1 = Mtd(children=[Mi(children=["a"])]) + cell2 = Mtd(children=[Mn(children=["1"])]) + cell3 = Mtd(children=[Mi(children=["b"])]) + cell4 = Mtd(children=[Mn(children=["2"])]) + + row1 = Mtr(children=[cell1, cell2]) + row2 = Mtr(children=[cell3, cell4]) + + table = Mtable(children=[row1, row2]) + + self.assertEqual(len(table.children), 2) + self.assertEqual(len(table.children[0].children), 2) + self.assertEqual(len(table.children[1].children), 2) + + def test_element_inheritance_hierarchy(self): + """Test that elements inherit from correct base classes.""" + inheritance_tests = [ + (Mi(children=["x"]), MathMLTokenElement, "token"), + ( + Mfrac( + children=[ + Mi(children=["a"]), + Mn(children=["1"]), + ] + ), + MathMLLayoutElement, + "layout", + ), + ( + Msub( + children=[ + Mi(children=["x"]), + Mn(children=["1"]), + ] + ), + MathMLScriptElement, + "script", + ), + (Mstyle(children=[]), MathMLGroupingElement, "grouping"), + ] + + for element, expected_base, description in inheritance_tests: + with self.subTest( + element=type(element).__name__, base=expected_base.__name__ + ): + self.assertIsInstance( + element, + expected_base, + f"{type(element).__name__} should be a {description} element", + ) + + +class TestMathMLSerialization(unittest.TestCase): + """Tests for object -> to_xml_string() using direct string comparison.""" + + def test_simple_mi(self): + obj = Mi(children=["x"]) + xml_str = obj.to_xml_string() + expected_xml_str = "x" + self.assertEqual(xml_str, expected_xml_str) + + def test_simple_mn_with_attribute(self): + obj = Mn(children=["123"], dir_=Dir.RTL) + xml_str = obj.to_xml_string() + expected_xml_str = '123' + self.assertEqual(xml_str, expected_xml_str) + + def test_mo_with_boolean_attribute(self): + obj = Mo(children=["+"], fence=True, separator=False) + xml_str = obj.to_xml_string() + expected_xml_str = '+' + self.assertEqual(xml_str, expected_xml_str) + + def test_mi_with_enum_attribute(self): + obj = Mi(children=["X"]) + xml_str = obj.to_xml_string() + expected_xml_str = "X" + self.assertEqual(xml_str, expected_xml_str) + + def test_math_element_with_attributes(self): + obj = Math( + display=MathMLDisplay.BLOCK, + alttext="Equation", + children=[Mi(children=["y"])], + ) + xml_str = obj.to_xml_string() + expected_xml_str = 'y' + self.assertEqual(xml_str, expected_xml_str) + + def test_mrow_nested_elements(self): + obj = Mrow( + children=[ + Mi(children=["a"]), + Mo(children=["+"]), + Mn(children=["1"]), + ], + id_="eq1", + class_="equation-style", + ) + xml_str = obj.to_xml_string() + expected_xml_str = 'a+1' + self.assertEqual(xml_str, expected_xml_str) + + def test_mfrac(self): + obj = Mfrac( + children=[ + Mi( + children=["numerator"], + ), + Mn(children=["denominator"]), + ] + ) + xml_str = obj.to_xml_string() + expected_xml_str = "numeratordenominator" + self.assertEqual(xml_str, expected_xml_str) + + def test_msubsup(self): + obj = Msubsup( + children=[ + Mi(children=["X"]), + Mn(children=["s"]), + Mn(children=["p"]), + ] + ) + xml_str = obj.to_xml_string() + expected_xml_str = "Xsp" + self.assertEqual(xml_str, expected_xml_str) + + def test_mtable_mtr_mtd(self): + obj = Mtable( + children=[ + Mtr( + children=[ + Mtd( + children=[ + Mi( + children=["R1C1"], + ) + ] + ), + Mtd( + children=[ + Mi( + children=["R1C2"], + ) + ] + ), + ] + ), + Mtr( + children=[ + Mtd(children=[Mn(children=["1"])]), + Mtd(children=[Mn(children=["2"])]), + ] + ), + ] + ) + xml_str = obj.to_xml_string() + expected_xml_str = "R1C1R1C212" # noqa: E501 + self.assertEqual(xml_str, expected_xml_str) + + def test_mixed_content_serialization(self): + obj = Mrow( + children=[ + "TextBefore", + Mi(children=["x"]), + "TextBetween", + Mn(children=["123"]), + "TextAfter", + ] + ) + xml_str = obj.to_xml_string() + expected_xml_str = ( + "TextBeforexTextBetween123TextAfter" + ) + self.assertEqual(xml_str, expected_xml_str) + + def test_semantics_annotation(self): + obj = Semantics( + children=[ + Mi(children=["x"]), + Annotation( + encoding="text/plain", + children=["Content of annotation"], + ), + ] + ) + xml_str = obj.to_xml_string() + expected_xml_str = 'xContent of annotation' # noqa: E501 + self.assertEqual(xml_str, expected_xml_str) + + def test_annotation_xml(self): + obj = AnnotationXml( + encoding="application/mathml+xml", + children=[ + Mrow( + children=[ + Mi( + children=["alt"], + ), + Mo(children=["="]), + Mn(children=["1"]), + ] + ) + ], + ) + xml_str = obj.to_xml_string() + expected_xml_str = 'alt=1' # noqa: E501 + self.assertEqual(xml_str, expected_xml_str) + + +class TestMathMLDeserialization(unittest.TestCase): + """Tests for from_string() -> object""" + + def test_simple_mi_from_string(self): + xml_str = "y" + result = Mi.from_string(xml_str) + self.assertEqual(len(result), 1) + obj = result[0] + self.assertIsInstance(obj, Mi) + self.assertEqual(len(obj.children), 1) + self.assertIsInstance(obj.children[0], TextNode) + self.assertEqual(obj.children[0].text, "y") + + def test_mo_from_string_with_attributes(self): + xml_str = '+ ' + result = Mo.from_string(xml_str) + self.assertEqual(len(result), 1) + obj = result[0] + self.assertIsInstance(obj, Mo) + self.assertTrue(obj.fence) + self.assertEqual(obj.lspace, "8px") + self.assertEqual(obj.children[0].text, "+ ") + + def test_mrow_nested_from_string(self): + xml_str = ( + 'a+1' + ) + result = Mrow.from_string(xml_str) + self.assertEqual(len(result), 1) + obj = result[0] + self.assertIsInstance(obj, Mrow) + self.assertEqual(obj.id_, "r1") + self.assertEqual(obj.class_, "test-class") + + self.assertEqual(len(obj.children), 3) + self.assertIsInstance(obj.children[0], Mi) + self.assertEqual(obj.children[0].children[0].text, "a") + self.assertIsInstance(obj.children[1], Mo) + self.assertEqual(obj.children[1].children[0].text, "+") + self.assertIsInstance(obj.children[2], Mn) + self.assertEqual(obj.children[2].children[0].text, "1") + + def test_mfrac_from_string(self): + xml_str = "ND" + result = Mfrac.from_string(xml_str) + self.assertEqual(len(result), 1) + obj = result[0] + self.assertIsInstance(obj, Mfrac) + self.assertEqual(len(obj.children), 2) + self.assertIsInstance(obj.children[0], Mi) + self.assertEqual(obj.children[0].children[0].text, "N") + self.assertIsInstance(obj.children[1], Mn) + self.assertEqual(obj.children[1].children[0].text, "D") + + def test_mixed_content_deserialization(self): + xml_str = "Prefix v Infix 42 Suffix" + result = Mrow.from_string(xml_str) + self.assertEqual(len(result), 1) + obj = result[0] + self.assertIsInstance(obj, Mrow) + + self.assertEqual(len(obj.children), 5) + self.assertIsInstance(obj.children[0], TextNode) + self.assertEqual(obj.children[0].text, "Prefix ") + self.assertIsInstance(obj.children[1], Mi) + self.assertEqual(obj.children[1].children[0].text, "v") + self.assertIsInstance(obj.children[2], TextNode) + self.assertEqual(obj.children[2].text, " Infix ") + self.assertIsInstance(obj.children[3], Mn) + self.assertEqual(obj.children[3].children[0].text, "42") + self.assertIsInstance(obj.children[4], TextNode) + self.assertEqual(obj.children[4].text, " Suffix") + + def test_semantics_annotation_from_string(self): + xml_str = ( + "" + " E" + ' E = mc^2' + "" + ) + result = Semantics.from_string(xml_str) + self.assertEqual(len(result), 1) + obj = result[0] + self.assertIsInstance(obj, Semantics) + self.assertEqual(len(obj.children), 2) + + self.assertIsInstance(obj.children[0], Mi) + self.assertEqual(obj.children[0].children[0].text, "E") + + ann_obj = obj.children[1] + self.assertIsInstance(ann_obj, Annotation) + self.assertEqual(ann_obj.encoding, "text/latex") + self.assertEqual(len(ann_obj.children), 1) + self.assertIsInstance(ann_obj.children[0], TextNode) + self.assertEqual(ann_obj.children[0].text, "E = mc^2") + + def test_annotation_xml_from_string(self): + xml_str = ( + '' + " alt=0" + "" + ) + result = AnnotationXml.from_string(xml_str) + self.assertEqual(len(result), 1) + obj = result[0] + self.assertIsInstance(obj, AnnotationXml) + self.assertEqual(obj.encoding, "application/mathml+xml") + self.assertEqual(len(obj.children), 1) + mrow_child = obj.children[0] + self.assertIsInstance(mrow_child, Mrow) + self.assertEqual(len(mrow_child.children), 3) + self.assertIsInstance(mrow_child.children[0], Mi) + self.assertEqual(mrow_child.children[0].children[0].text, "alt") + + def test_from_string_multiple_root_elements(self): + xml_str = "a1" + result = MathMLElement.from_string(xml_str) + self.assertEqual(len(result), 2) + self.assertIsInstance(result[0], Mi) + self.assertEqual(result[0].children[0].text, "a") + self.assertIsInstance(result[1], Mn) + self.assertEqual(result[1].children[0].text, "1") + + +class TestErrorHandling(unittest.TestCase): + def test_from_string_invalid_xml(self): + xml_str = "x" + with self.assertRaisesRegex(ValueError, "Invalid Markup: mismatched tag"): + Mi.from_string(xml_str) + + def test_from_string_unregistered_tag(self): + xml_str = "content" + + with self.assertRaisesRegex( + ValueError, "No registered class found for tag: unregisteredtag" + ): + MathMLElement.from_string(xml_str) + + def test_attribute_validation_error_on_creation(self): + with self.assertRaises(ValueError): # Pydantic's ValidationError + Mi(mathvariant="not-a-valid-variant", children=["x"]) + + +class TestComplexMathematicalExpressions(unittest.TestCase): + """Tests for complex, realistic mathematical expressions.""" + + def test_quadratic_formula(self): + """Test the quadratic formula: x = (-b ± √(b²-4ac)) / 2a""" + # Create: x = (-b ± √(b²-4ac)) / 2a + + # Left side: x = + x = Mi(children=["x"]) + equals = Mo(children=["="]) + + # Right side numerator: -b ± √(b²-4ac) + minus_b = Mrow( + children=[ + Mo(children=["-"]), + Mi(children=["b"]), + ] + ) + + plus_minus = Mo(children=["±"]) + + # b²-4ac inside square root + b_squared = Msup( + children=[ + Mi(children=["b"]), + Mn(children=["2"]), + ] + ) + + four_ac = Mrow( + children=[ + Mn(children=["4"]), + Mi(children=["a"]), + Mi(children=["c"]), + ] + ) + + discriminant = Mrow(children=[b_squared, Mo(children=["-"]), four_ac]) + + sqrt_discriminant = Msqrt(children=[discriminant]) + + numerator = Mrow(children=[minus_b, plus_minus, sqrt_discriminant]) + + # Denominator: 2a + denominator = Mrow( + children=[ + Mn(children=["2"]), + Mi(children=["a"]), + ] + ) + + # Complete fraction + fraction = Mfrac(children=[numerator, denominator]) + + # Complete equation + equation = Mrow(children=[x, equals, fraction]) + + # Test serialization + xml_str = equation.to_xml_string() + self.assertIn("", xml_str) + self.assertIn("", xml_str) + self.assertIn("", xml_str) + + # Test round-trip + result = Mrow.from_string(xml_str) + self.assertEqual(len(result), 1) + self.assertIsInstance(result[0], Mrow) + + def test_integral_with_limits(self): + """Test definite integral: ∫₀^∞ e^(-x²) dx""" + + # Integral symbol with limits + integral_symbol = Mo(children=["∫"]) + lower_limit = Mn(children=["0"]) + upper_limit = Mo(children=["∞"]) + + integral_with_limits = Msubsup( + children=[integral_symbol, lower_limit, upper_limit] + ) + + # e^(-x²) + e = Mi(children=["e"]) + + # -x² + minus = Mo(children=["-"]) + x_squared = Msup( + children=[ + Mi(children=["x"]), + Mn(children=["2"]), + ] + ) + negative_x_squared = Mrow(children=[minus, x_squared]) + + # e^(-x²) + exponential = Msup(children=[e, negative_x_squared]) + + # dx + differential = Mrow( + children=[ + Mi(children=["d"]), + Mi(children=["x"]), + ] + ) + + # Complete integral + integral = Mrow(children=[integral_with_limits, exponential, differential]) + + # Test structure + xml_str = integral.to_xml_string() + self.assertIn("", xml_str) + self.assertIn("∫", xml_str) + self.assertIn("∞", xml_str) + + def test_matrix_expression(self): + """Test 2x2 matrix with expressions in cells.""" + + # Matrix elements + # Row 1: [cos θ, -sin θ] + cos_theta = Mrow( + children=[ + Mo(children=["cos"]), + Mi(children=["θ"]), + ] + ) + + minus_sin_theta = Mrow( + children=[ + Mo(children=["-"]), + Mo(children=["sin"]), + Mi(children=["θ"]), + ] + ) + + row1_cell1 = Mtd(children=[cos_theta]) + row1_cell2 = Mtd(children=[minus_sin_theta]) + row1 = Mtr(children=[row1_cell1, row1_cell2]) + + # Row 2: [sin θ, cos θ] + sin_theta = Mrow( + children=[ + Mo(children=["sin"]), + Mi(children=["θ"]), + ] + ) + + row2_cell1 = Mtd(children=[sin_theta]) + row2_cell2 = Mtd(children=[cos_theta]) + row2 = Mtr(children=[row2_cell1, row2_cell2]) + + # Complete matrix + matrix = Mtable(children=[row1, row2]) + + # Test structure + self.assertEqual(len(matrix.children), 2) + self.assertEqual(len(matrix.children[0].children), 2) + self.assertEqual(len(matrix.children[1].children), 2) + + def test_summation_with_complex_expression(self): + """Test summation: Σ(k=1 to n) k²/(k+1)""" + + # Summation symbol + sigma = Mo(children=["Σ"]) + + # Lower limit: k=1 + k_equals_1 = Mrow( + children=[ + Mi(children=["k"]), + Mo(children=["="]), + Mn(children=["1"]), + ] + ) + + # Upper limit: n + n = Mi(children=["n"]) + + # Summation with limits + summation = Munderover(children=[sigma, k_equals_1, n]) + + # Expression being summed: k²/(k+1) + k_squared = Msup( + children=[ + Mi(children=["k"]), + Mn(children=["2"]), + ] + ) + + k_plus_1 = Mrow( + children=[ + Mi(children=["k"]), + Mo(children=["+"]), + Mn(children=["1"]), + ] + ) + + fraction = Mfrac(children=[k_squared, k_plus_1]) + + # Complete expression + complete_sum = Mrow(children=[summation, fraction]) + + # Test serialization + xml_str = complete_sum.to_xml_string() + self.assertIn("", xml_str) + self.assertIn("Σ", xml_str) + self.assertIn("", xml_str) + + def test_chemical_equation(self): + """Test chemical equation: H₂ + ½O₂ → H₂O""" + + # H₂ + h2 = Mrow( + children=[ + Mi(children=["H"]), + Msub( + children=[ + Mrow(children=[]), # Empty base for subscript positioning + Mn(children=["2"]), + ] + ), + ] + ) + + # Plus sign + plus = Mo(children=["+"]) + + # ½O₂ + half = Mfrac( + children=[ + Mn(children=["1"]), + Mn(children=["2"]), + ] + ) + + o2 = Mrow( + children=[ + Mi(children=["O"]), + Msub(children=[Mrow(children=[]), Mn(children=["2"])]), + ] + ) + + half_o2 = Mrow(children=[half, o2]) + + # Arrow + arrow = Mo(children=["→"]) + + # H₂O + h2o = Mrow( + children=[ + Mi(children=["H"]), + Msub(children=[Mrow(children=[]), Mn(children=["2"])]), + Mi(children=["O"]), + ] + ) + + # Complete equation + equation = Mrow(children=[h2, plus, half_o2, arrow, h2o]) + + # Test structure + xml_str = equation.to_xml_string() + self.assertIn("→", xml_str) + self.assertIn("", xml_str) + self.assertIn("", xml_str) + + def test_nested_fractions(self): + """Test deeply nested fractions: (a/b) / (c/d) = ad/bc""" + + # a/b + a_over_b = Mfrac( + children=[ + Mi(children=["a"]), + Mi(children=["b"]), + ] + ) + + # c/d + c_over_d = Mfrac( + children=[ + Mi(children=["c"]), + Mi(children=["d"]), + ] + ) + + # (a/b) / (c/d) + complex_fraction = Mfrac(children=[a_over_b, c_over_d]) + + # = + equals = Mo(children=["="]) + + # ad + ad = Mrow( + children=[ + Mi(children=["a"]), + Mi(children=["d"]), + ] + ) + + # bc + bc = Mrow( + children=[ + Mi(children=["b"]), + Mi(children=["c"]), + ] + ) + + # ad/bc + result_fraction = Mfrac(children=[ad, bc]) + + # Complete equation + equation = Mrow(children=[complex_fraction, equals, result_fraction]) + + # Test nesting depth + xml_str = equation.to_xml_string() + # Should have nested mfrac elements + frac_count = xml_str.count("") + self.assertEqual(frac_count, 4) + + def test_multiscript_notation(self): + """Test multiscript notation: ₁₁²³⁵U²³⁸""" + + # Base element + u = Mi(children=["U"]) + + # Pre-subscripts and pre-superscripts + prescripts = Mprescripts() + + # Create multiscripts element + # Format: base, post-sub, post-sup, prescripts, pre-sub, pre-sup + multiscripts = Mmultiscripts( + children=[ + u, # base + Mn(children=["238"]), # post-subscript + Mrow(children=[]), # no post-superscript + prescripts, + Mn(children=["92"]), # pre-subscript (atomic number) + Mrow(children=[]), # no pre-superscript + ] + ) + + xml_str = multiscripts.to_xml_string() + self.assertIn("", xml_str) + self.assertIn("", xml_str) + + def test_equation_with_semantics(self): + """Test equation with semantic annotations.""" + + # E = mc² + e = Mi(children=["E"]) + equals = Mo(children=["="]) + m = Mi(children=["m"]) + c_squared = Msup( + children=[ + Mi(children=["c"]), + Mn(children=["2"]), + ] + ) + + equation = Mrow(children=[e, equals, m, c_squared]) + + # Add semantic annotation + latex_annotation = Annotation( + encoding="application/x-tex", children=["E = mc^2"] + ) + + text_annotation = Annotation( + encoding="text/plain", + children=["Einstein's mass-energy equivalence"], + ) + + semantics = Semantics(children=[equation, latex_annotation, text_annotation]) + + # Test structure + self.assertEqual(len(semantics.children), 3) + self.assertIsInstance(semantics.children[0], Mrow) + self.assertIsInstance(semantics.children[1], Annotation) + self.assertIsInstance(semantics.children[2], Annotation) + + def test_styled_expression(self): + """Test expression with styling applied.""" + + # Create expression: f(x) = x² + 1 + f = Mi(children=["f"]) + x_arg = Mi(children=["x"]) + function_call = Mrow( + children=[ + f, + Mo(children=["("]), + x_arg, + Mo(children=[")"]), + ] + ) + + equals = Mo(children=["="]) + + x_squared = Msup( + children=[ + Mi(children=["x"]), + Mn(children=["2"]), + ] + ) + + plus = Mo(children=["+"]) + one = Mn(children=["1"]) + + expression = Mrow(children=[x_squared, plus, one]) + + # Wrap in styled container + styled_expression = Mstyle( + mathcolor="blue", + mathsize="14pt", + children=[function_call, equals, expression], + ) + + # Test styling attributes + self.assertEqual(styled_expression.mathcolor, "blue") + self.assertEqual(styled_expression.mathsize, "14pt") + + +class TestEdgeCasesAndCompliance(unittest.TestCase): + """Tests for edge cases, boundary conditions, and MathML Core compliance.""" + + def test_unicode_content_handling(self): + """Test proper handling of Unicode mathematical symbols.""" + unicode_symbols = [ + "α", + "β", + "γ", + "π", + "∑", + "∫", + "∞", + "≤", + "≥", + "≠", + "∂", + "∇", + "√", + "∈", + "∉", + "⊂", + "⊃", + "∪", + "∩", + "→", + ] + + for symbol in unicode_symbols: + with self.subTest(symbol=symbol): + # Test in Mi element + mi = Mi(children=[symbol]) + xml_str = mi.to_xml_string() + self.assertIn(symbol, xml_str) + + # Test round-trip + result = Mi.from_string(xml_str) + self.assertEqual(result[0].children[0].text, symbol) + + def test_empty_elements_compliance(self): + """Test MathML Core compliance for empty elements.""" + + # Elements that can be empty + empty_allowed = [ + Math(children=[]), + Mrow(children=[]), + Msqrt(children=[]), + Mstyle(children=[]), + Merror(children=[]), + Mphantom(children=[]), + ] + + for element in empty_allowed: + with self.subTest(element=type(element).__name__): + xml_str = element.to_xml_string() + # Should produce valid XML + self.assertTrue(xml_str.startswith("<")) + self.assertTrue(xml_str.endswith(">")) + + def test_whitespace_handling(self): + """Test proper whitespace handling in text content.""" + + # Leading/trailing whitespace in text content + text_with_spaces = " x " + mi = Mi(children=[text_with_spaces]) + xml_str = mi.to_xml_string() + + # Round-trip test + result = Mi.from_string(xml_str) + self.assertEqual(result[0].children[0].text, text_with_spaces) + + # Mixed whitespace in Mtext + text_content = "This is\tsome\ntext with\r\nvarious whitespace" + mtext = Mtext(children=[text_content]) + xml_str = mtext.to_xml_string() + + result = Mtext.from_string(xml_str) + self.assertEqual(result[0].children[0].text, text_content.replace("\r", "")) + + def test_special_characters_in_content(self): + """Test handling of XML special characters in content.""" + + special_chars = ["&", "<", ">", '"', "'"] + + for char in special_chars: + with self.subTest(char=char): + mtext = Mtext(children=[f"Before{char}After"]) + xml_str = mtext.to_xml_string() + + # Should not contain unescaped special characters + if char == "&": + self.assertIn("&", xml_str) + elif char == "<": + self.assertIn("<", xml_str) + elif char == ">": + self.assertIn(">", xml_str) + + # Round-trip should preserve original content + result = Mtext.from_string(xml_str) + self.assertEqual(result[0].children[0].text, f"Before{char}After") + + def test_display_attribute_compliance(self): + """Test Math element display attribute compliance.""" + + # Test both valid display values + for display_value in [MathMLDisplay.BLOCK, MathMLDisplay.INLINE]: + with self.subTest(display=display_value): + math = Math(display=display_value, children=[]) + xml_str = math.to_xml_string() + self.assertIn(f'display="{display_value.value}"', xml_str) + + def test_length_percentage_edge_cases(self): + """Test edge cases for length-percentage values.""" + + # Edge cases that should be valid + valid_edge_cases = [ + "0", # Unitless zero + "0px", # Zero with unit + "+0", # Explicit positive zero + "-0", # Negative zero + "0.0px", # Decimal zero + ".5em", # Leading decimal point + "100%", # Full percentage + "0%", # Zero percentage + "+50%", # Explicit positive percentage + ] + + for value in valid_edge_cases: + with self.subTest(value=value): + try: + mspace = Mspace(width=value) + self.assertEqual(mspace.width, value) + except ValidationError: + self.fail(f"Valid edge case {value} was rejected") + + def test_extremely_long_content(self): + """Test handling of very long text content.""" + + # Create very long text content + long_text = "x" * 10000 + mtext = Mtext(children=[long_text]) + + # Should handle without issues + xml_str = mtext.to_xml_string() + self.assertIn(long_text, xml_str) + + # Round-trip test + result = Mtext.from_string(xml_str) + self.assertEqual(result[0].children[0].text, long_text) + + def test_deeply_nested_structures(self): + """Test deeply nested element structures.""" + + # Create deeply nested structure: ((((x)))) + content = Mi(children=["x"]) + + # Nest 10 levels deep + for i in range(10): + content = Mrow(children=[content]) + + # Should serialize without issues + xml_str = content.to_xml_string() + + # Count nesting depth + open_count = xml_str.count("") + close_count = xml_str.count("") + self.assertEqual(open_count, 10) + self.assertEqual(close_count, 10) + + def test_mixed_content_edge_cases(self): + """Test edge cases in mixed content.""" + + # Empty text nodes mixed with elements + mrow = Mrow( + children=[ + "", + Mi(children=["x"]), + "", + Mo(children=["+"]), + "", + Mn(children=["1"]), + ] + ) + + xml_str = mrow.to_xml_string() + + # Should strip empty text nodes + result = Mrow.from_string(xml_str) + self.assertEqual(len(result[0].children), 3) + + def test_attribute_value_edge_cases(self): + """Test edge cases for attribute values.""" + + # Very long attribute values + long_alttext = "A" * 1000 + math = Math(alttext=long_alttext, children=[]) + xml_str = math.to_xml_string() + self.assertIn(long_alttext, xml_str) + + # Attribute values with special characters + special_alttext = 'Text with "quotes" and &ersands' + math = Math(alttext=special_alttext, children=[]) + xml_str = math.to_xml_string() + + # Should properly escape in XML + result = Math.from_string(xml_str) + self.assertEqual(result[0].alttext, special_alttext) + + def test_script_element_edge_cases(self): + """Test edge cases for script elements.""" + + # Script elements with minimal content + base = Mi(children=["x"]) + empty_script = Mi(children=[""]) + + msub = Msub(children=[base, empty_script]) + xml_str = msub.to_xml_string() + + # Should handle empty script content + result = Msub.from_string(xml_str) + self.assertEqual(len(result[0].children), 2) + + def test_namespace_compliance(self): + """Test MathML namespace handling if supported.""" + + # Basic elements should work without explicit namespace in this implementation + mi = Mi(children=["x"]) + xml_str = mi.to_xml_string() + + # Should produce valid MathML-compatible XML + self.assertTrue(xml_str.startswith("")) + + def test_boolean_attribute_edge_cases(self): + """Test edge cases for boolean attributes.""" + + # Test all boolean attributes on Mo element + mo = Mo( + fence=True, + largeop=False, + movablelimits=True, + separator=False, + stretchy=True, + symmetric=False, + children=["∑"], + ) + + xml_str = mo.to_xml_string() + + # All boolean values should serialize + self.assertIn('fence="true"', xml_str) + self.assertIn('largeop="false"', xml_str) + self.assertIn('movablelimits="true"', xml_str) + self.assertIn('separator="false"', xml_str) + self.assertIn('stretchy="true"', xml_str) + self.assertIn('symmetric="false"', xml_str) + + def test_semantics_edge_cases(self): + """Test edge cases for semantic elements.""" + + # Semantics with only presentation content (no annotations) + presentation = Mi(children=["E"]) + ann1 = Annotation(encoding="text/plain", children=["First"]) + semantics = Semantics(children=[presentation, ann1]) + + xml_str = semantics.to_xml_string() + result = Semantics.from_string(xml_str) + self.assertEqual(len(result[0].children), 2) + + # Multiple annotations of same type + ann2 = Annotation(encoding="text/plain", children=["Second"]) + + semantics_multi = Semantics(children=[presentation, ann1, ann2]) + xml_str = semantics_multi.to_xml_string() + self.assertEqual(xml_str.count("10") + + value_element_with_attributes = Value( + value="5", + field_identifier="part1", + base_type=BaseType.INTEGER, + ) + self.assertEqual( + value_element_with_attributes.to_xml_string(), + '5', + ) + + def test_correct_response_element(self): + correct_response_element = CorrectResponse( + value=[Value(value="A"), Value(value="B")] + ) + self.assertEqual( + correct_response_element.to_xml_string(), + "AB", + ) + + def test_response_declaration_element(self): + response_declaration_element = ResponseDeclaration( + identifier="RESPONSE_1", + cardinality=Cardinality.SINGLE, + base_type=BaseType.IDENTIFIER, + correct_response=CorrectResponse(value=[Value(value="choiceA")]), + ) + expected_xml = 'choiceA' # noqa: E501 + self.assertEqual(response_declaration_element.to_xml_string(), expected_xml) + + def test_outcome_declaration_element(self): + outcome_declaration_element = OutcomeDeclaration( + identifier="SCORE", + cardinality=Cardinality.SINGLE, + base_type=BaseType.FLOAT, + ) + expected_xml = '' + self.assertEqual(outcome_declaration_element.to_xml_string(), expected_xml) + + def test_response_processing_element(self): + response_processing_element = ResponseProcessing( + template="https://example.com/response_processing.xml" + ) + self.assertEqual( + response_processing_element.to_xml_string(), + '', + ) + + def test_assessment_item_element(self): + item_body = ItemBody(children=[P(children=["Test Item Body Content"])]) + assessment_item_element = AssessmentItem( + identifier="item_1", + title="Test Assessment Item", + language="en-US", + item_body=item_body, + ) + expected_xml = '

    Test Item Body Content

    ' # noqa: E501 + self.assertEqual(assessment_item_element.to_xml_string(), expected_xml) + + def test_prompt_element(self): + prompt_element = Prompt(children=["This is the prompt text."]) + self.assertEqual( + prompt_element.to_xml_string(), + "This is the prompt text.", + ) + + def test_simple_choice_element(self): + simple_choice_element = SimpleChoice( + identifier="choice1", children=["Choice 1"] + ) + self.assertEqual( + simple_choice_element.to_xml_string(), + 'Choice 1', + ) + + def test_choice_interaction_element(self): + choice1 = SimpleChoice(identifier="choice1", children=["Choice 1"]) + choice2 = SimpleChoice(identifier="choice2", children=["Choice 2"]) + choice_interaction_element = ChoiceInteraction( + answers=[choice1, choice2], + response_identifier="RESPONSE", + prompt=Prompt(children=["Select the correct answer."]), + ) + expected_xml = 'Select the correct answer.Choice 1Choice 2' # noqa: E501 + self.assertEqual(choice_interaction_element.to_xml_string(), expected_xml) + + def test_text_entry_interaction_element(self): + text_entry_interaction = TextEntryInteraction( + response_identifier="textEntry1", + expected_length=10, + placeholder_text="Enter your answer", + ) + expected_xml = '' + self.assertEqual(text_entry_interaction.to_xml_string(), expected_xml) + + def test_extended_text_interaction_element(self): + extended_text_interaction = ExtendedTextInteraction( + response_identifier="extendedText1", + placeholder_text="Enter your essay here.", + prompt=Prompt(children=["What is truth?"]), + ) + expected_xml = 'What is truth?' # noqa: E501 + self.assertEqual(extended_text_interaction.to_xml_string(), expected_xml) diff --git a/contentcuration/contentcuration/tests/utils/test_automation_manager.py b/contentcuration/contentcuration/tests/utils/test_automation_manager.py index a01eaaa228..5f1833d6cf 100644 --- a/contentcuration/contentcuration/tests/utils/test_automation_manager.py +++ b/contentcuration/contentcuration/tests/utils/test_automation_manager.py @@ -1,5 +1,4 @@ import unittest -from unittest.mock import MagicMock from contentcuration.utils.automation_manager import AutomationManager @@ -11,30 +10,3 @@ def setUp(self): def test_creation(self): # Check if an instance of AutomationManager is created successfully self.assertIsInstance(self.automation_manager, AutomationManager) - - def test_generate_embedding(self): - text = "Some text that needs to be embedded" - # Mock the generate_embedding method of RecommendationsAdapter - # as the implementation is yet to be done - self.automation_manager.recommendations_backend_adapter.generate_embedding = MagicMock(return_value=[0.1, 0.2, 0.3]) - embedding_vector = self.automation_manager.generate_embedding(text) - self.assertIsNotNone(embedding_vector) - - def test_embedding_exists(self): - embedding_vector = [0.1, 0.2, 0.3] - # Currently no solid implementation exists for this - # So the embadding_exists function returns true anyways - exists = self.automation_manager.embedding_exists(embedding_vector) - self.assertTrue(exists) - - def test_load_recommendations(self): - embedding_vector = [0.1, 0.2, 0.3] - self.automation_manager.recommendations_backend_adapter.get_recommendations = MagicMock(return_value=["item1", "item2"]) - recommendations = self.automation_manager.load_recommendations(embedding_vector) - self.assertIsInstance(recommendations, list) - - def test_cache_embeddings(self): - embeddings_list = [[0.1, 0.2, 0.3]] - # Currently the function returns true anyways - success = self.automation_manager.cache_embeddings(embeddings_list) - self.assertTrue(success) diff --git a/contentcuration/contentcuration/tests/utils/test_cache.py b/contentcuration/contentcuration/tests/utils/test_cache.py index d16570648a..6eab570d0f 100644 --- a/contentcuration/contentcuration/tests/utils/test_cache.py +++ b/contentcuration/contentcuration/tests/utils/test_cache.py @@ -31,7 +31,9 @@ def test_size_key(self): self.assertEqual("abcdefghijklmnopqrstuvwxyz:value", self.helper.size_key) def test_modified_key(self): - self.assertEqual("abcdefghijklmnopqrstuvwxyz:modified", self.helper.modified_key) + self.assertEqual( + "abcdefghijklmnopqrstuvwxyz:modified", self.helper.modified_key + ) def test_cache_get(self): self.redis_client.hget.return_value = 123 @@ -42,11 +44,15 @@ def test_cache_get__not_redis(self): self.cache.client = mock.Mock() self.cache.get.return_value = 123 self.assertEqual(123, self.helper.cache_get("test_key")) - self.cache.get.assert_called_once_with("{}:{}".format(self.helper.hash_key, "test_key")) + self.cache.get.assert_called_once_with( + "{}:{}".format(self.helper.hash_key, "test_key") + ) def test_cache_set(self): self.helper.cache_set("test_key", 123) - self.redis_client.hset.assert_called_once_with(self.helper.hash_key, "test_key", 123) + self.redis_client.hset.assert_called_once_with( + self.helper.hash_key, "test_key", 123 + ) def test_cache_set__delete(self): self.helper.cache_set("test_key", None) @@ -55,28 +61,32 @@ def test_cache_set__delete(self): def test_cache_set__not_redis(self): self.cache.client = mock.Mock() self.helper.cache_set("test_key", 123) - self.cache.set.assert_called_once_with("{}:{}".format(self.helper.hash_key, "test_key"), 123) + self.cache.set.assert_called_once_with( + "{}:{}".format(self.helper.hash_key, "test_key"), 123 + ) def test_get_size(self): - with mock.patch.object(self.helper, 'cache_get') as cache_get: + with mock.patch.object(self.helper, "cache_get") as cache_get: cache_get.return_value = 123 self.assertEqual(123, self.helper.get_size()) cache_get.assert_called_once_with(self.helper.size_key) def test_set_size(self): - with mock.patch.object(self.helper, 'cache_set') as cache_set: + with mock.patch.object(self.helper, "cache_set") as cache_set: self.helper.set_size(123) cache_set.assert_called_once_with(self.helper.size_key, 123) def test_get_modified(self): - with mock.patch.object(self.helper, 'cache_get') as cache_get: - cache_get.return_value = '2021-01-01 00:00:00' + with mock.patch.object(self.helper, "cache_get") as cache_get: + cache_get.return_value = "2021-01-01 00:00:00" modified = self.helper.get_modified() self.assertIsNotNone(modified) - self.assertEqual('2021-01-01T00:00:00', modified.isoformat()) + self.assertEqual("2021-01-01T00:00:00", modified.isoformat()) cache_get.assert_called_once_with(self.helper.modified_key) def test_set_modified(self): - with mock.patch.object(self.helper, 'cache_set') as cache_set: - self.helper.set_modified('2021-01-01 00:00:00') - cache_set.assert_called_once_with(self.helper.modified_key, '2021-01-01 00:00:00') + with mock.patch.object(self.helper, "cache_set") as cache_set: + self.helper.set_modified("2021-01-01 00:00:00") + cache_set.assert_called_once_with( + self.helper.modified_key, "2021-01-01 00:00:00" + ) diff --git a/contentcuration/contentcuration/tests/utils/test_cloud_storage.py b/contentcuration/contentcuration/tests/utils/test_cloud_storage.py index 3aade0d72a..5d84fd9f10 100644 --- a/contentcuration/contentcuration/tests/utils/test_cloud_storage.py +++ b/contentcuration/contentcuration/tests/utils/test_cloud_storage.py @@ -7,4 +7,4 @@ class CloudStorageTestCase(TestCase): def test_backend_initialization(self): cloud_storage_instance = CloudStorage() self.assertIsNotNone(cloud_storage_instance) - self.assertIsInstance(cloud_storage_instance.get_instance(), CloudStorage) + self.assertIsInstance(cloud_storage_instance, CloudStorage) diff --git a/contentcuration/contentcuration/tests/utils/test_exercise_creation.py b/contentcuration/contentcuration/tests/utils/test_exercise_creation.py new file mode 100644 index 0000000000..d9cd3add1b --- /dev/null +++ b/contentcuration/contentcuration/tests/utils/test_exercise_creation.py @@ -0,0 +1,2109 @@ +# flake8: noqa: E501 +# Ignore line length issues in this file +# Black will autoformat where possible, so this is not too egregious +# but will allow our long strings where necessary. +import json +import os +import re +import zipfile +from io import BytesIO +from uuid import uuid4 + +from django.core.files.storage import default_storage as storage +from le_utils.constants import content_kinds +from le_utils.constants import exercises +from le_utils.constants import file_formats +from le_utils.constants import format_presets + +from contentcuration.models import AssessmentItem +from contentcuration.models import ContentNode +from contentcuration.tests.base import StudioTestCase +from contentcuration.tests.testdata import fileobj_exercise_graphie +from contentcuration.tests.testdata import fileobj_exercise_image +from contentcuration.utils.assessment.perseus import PerseusExerciseGenerator +from contentcuration.utils.assessment.qti.archive import hex_to_qti_id +from contentcuration.utils.assessment.qti.archive import QTIExerciseGenerator + + +class TestPerseusExerciseCreation(StudioTestCase): + """ + Tests for the create_perseus_exercise function which handles exercise file generation. + + These tests verify that the function correctly packages assessment items, + images, and other resources into a valid Perseus exercise zip file. + + These tests were generated using Claude Sonnet 3.7 Extended thinking. + All tests, bar the image resizing tests, were then heavily edited to better fit the test + cases. The image resizing tests are committed here mostly unmodified. + """ + + def setUp(self): + self.setUpBase() + + # Create an exercise node + self.exercise_node = ContentNode.objects.create( + title="Test Exercise", + node_id="1234567890abcdef1234567890abcded", + content_id="fedcba0987654321fedcba0987654321", + kind_id=content_kinds.EXERCISE, + parent=self.channel.main_tree, + extra_fields=json.dumps( + { + "randomize": True, + "options": { + "completion_criteria": { + "model": "mastery", + "threshold": { + "mastery_model": exercises.M_OF_N, + "m": 3, + "n": 5, + }, + } + }, + } + ), + ) + + def _create_assessment_item( + self, item_type, question_text, answers, hints=None, assessment_id=None + ): + """Helper to create assessment items with the right structure""" + if hints is None: + hints = [{"hint": "This is a hint", "order": 1}] + + item = AssessmentItem.objects.create( + contentnode=self.exercise_node, + assessment_id=assessment_id or uuid4().hex, + type=item_type, + question=question_text, + answers=json.dumps(answers), + hints=json.dumps(hints), + raw_data="{}", + order=len(self.exercise_node.assessment_items.all()) + 1, + randomize=True, + ) + return item + + def _create_perseus_zip(self, exercise_data): + generator = PerseusExerciseGenerator( + self.exercise_node, + exercise_data, + self.channel.id, + "en-US", + user_id=self.user.id, + ) + return generator.create_exercise_archive() + + def _validate_perseus_zip(self, exercise_file): + """Helper to validate the structure of the Perseus zip file""" + # Use Django's storage backend to read the file + with storage.open(exercise_file.file_on_disk.name, "rb") as f: + zip_data = f.read() + + zip_file = zipfile.ZipFile(BytesIO(zip_data)) + + # Check that the exercise.json file exists and is valid JSON + assert ( + "exercise.json" in zip_file.namelist() + ), "exercise.json not found in zip file" + exercise_data = json.loads(zip_file.read("exercise.json").decode("utf-8")) + + assert ( + "all_assessment_items" in exercise_data + ), "all_assessment_items missing in exercise data" + assert ( + "mastery_model" in exercise_data + ), "mastery_model missing in exercise data" + + # Check that each assessment item has a corresponding JSON file + for item_id in exercise_data["all_assessment_items"]: + assert ( + f"{item_id}.json" in zip_file.namelist() + ), f"JSON file for item {item_id} missing" + try: + item_json = json.loads(zip_file.read(f"{item_id}.json").decode("utf-8")) + except json.JSONDecodeError: + self.fail(f"Invalid JSON data for {item_id}") + + self.assertIn("question", item_json) + self.assertIn("answerArea", item_json) + self.assertIn("itemDataVersion", item_json) + self.assertIn("hints", item_json) + + # Return the zip object and data for additional assertions + return zip_file, exercise_data + + def test_basic_exercise_creation(self): + """Test the basic creation of a Perseus exercise with a single question""" + # Create a simple multiple choice question + item = self._create_assessment_item( + exercises.SINGLE_SELECTION, + "What is 2+2?", + [ + {"answer": "4", "correct": True, "order": 1}, + {"answer": "3", "correct": False, "order": 2}, + {"answer": "5", "correct": False, "order": 3}, + ], + assessment_id="1234567890abcdef1234567890abcdef", + ) + + # Create the exercise data structure + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 5, + "m": 3, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.SINGLE_SELECTION}, + } + + # Call the function to create the Perseus exercise + self._create_perseus_zip(exercise_data) + + # Verify that a file was created for the node + exercise_file = self.exercise_node.files.get(preset_id=format_presets.EXERCISE) + self.assertIsNotNone(exercise_file) + self.assertEqual(exercise_file.file_format_id, file_formats.PERSEUS) + + # Validate the contents of the zip file + zip_file, parsed_data = self._validate_perseus_zip(exercise_file) + + # Verify specific content details + self.assertEqual(parsed_data["all_assessment_items"], [item.assessment_id]) + self.assertEqual(parsed_data["m"], 3) + self.assertEqual(parsed_data["n"], 5) + self.assertTrue(parsed_data["randomize"]) + + # Check that the assessment item file contains the expected content + item_json = json.loads( + zip_file.read(f"{item.assessment_id}.json").decode("utf-8") + ) + self.assertIn("What is 2+2?", item_json["question"]["content"]) + answers = item_json["question"]["widgets"]["radio 1"]["options"]["choices"] + self.assertEqual(len(answers), 3) + self.assertTrue( + any(ans["content"] == "4" and ans["correct"] for ans in answers) + ) + + # Hard code the generated checksum for the file for this test. + # Only change this and the contents of this test if we have decided that + # we are deliberately changing the archive generation algorithm for perseus files. + self.assertEqual(exercise_file.checksum, "0ec7e964b466ebc76e81e175570e97f1") + + def test_multiple_images_index_mismatch_regression(self): + """Regression test for index mismatch bug in process_image_strings method. + + When content is modified inside the re.finditer loop, subsequent matches + point to invalid positions due to string length changes, resulting in + malformed image processing. + """ + # Create three image files - use mix of resized and non-resized images + # to trigger different replacement lengths + image1 = fileobj_exercise_image(size=(100, 100), color="red") + image2 = fileobj_exercise_image(size=(200, 200), color="blue") + image3 = fileobj_exercise_image(size=(300, 300), color="green") + + # Create URLs for all images + image1_url = exercises.CONTENT_STORAGE_FORMAT.format(image1.filename()) + image2_url = exercises.CONTENT_STORAGE_FORMAT.format(image2.filename()) + image3_url = exercises.CONTENT_STORAGE_FORMAT.format(image3.filename()) + + # Create question with multiple images - mix of resized and original + # This should create different length replacements + question_text = ( + f"First image (resized): ![img1]({image1_url} =50x50)\n" + f"Second image (original): ![img2]({image2_url})\n" + f"Third image (resized): ![img3]({image3_url} =70x70)" + ) + + item = self._create_assessment_item( + exercises.SINGLE_SELECTION, + question_text, + [{"answer": "Answer", "correct": True, "order": 1}], + ) + + # Associate all images with the assessment item + for img in [image1, image2, image3]: + img.assessment_item = item + img.save() + + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 1, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.SINGLE_SELECTION}, + } + + # Create the Perseus exercise + self._create_perseus_zip(exercise_data) + exercise_file = self.exercise_node.files.get(preset_id=format_presets.EXERCISE) + zip_file, _ = self._validate_perseus_zip(exercise_file) + + # Get the Perseus item JSON content + item_json = json.loads( + zip_file.read(f"{item.assessment_id}.json").decode("utf-8") + ) + question_content = item_json["question"]["content"] + + # Extract all markdown image references using the same pattern as the code + markdown_pattern = r"!\[([^\]]*)\]\(([^)]+)\)" + matches = re.findall(markdown_pattern, question_content) + + # Check that we have exactly 3 well-formed image references + # If the bug exists, we might get malformed content due to index mismatch + self.assertEqual( + len(matches), + 3, + f"Expected 3 image references, found {len(matches)} in content: {question_content}", + ) + + # Verify each match has proper structure + for i, (alt_text, _) in enumerate(matches): + expected_alt = f"img{i+1}" + self.assertEqual( + alt_text, + expected_alt, + f"Image {i+1} alt text malformed: got '{alt_text}', expected '{expected_alt}'", + ) + + # Verify that width and height are properly included in the question images + question_images = item_json["question"]["images"] + + self.assertEqual( + len(question_images), + 2, + f"Expected 2 image entries with dimensions, found {len(question_images)}: {list(question_images.keys())}", + ) + + # Verify that we have images with the expected dimensions + for image_name, image_data in question_images.items(): + width, height = image_data["width"], image_data["height"] + if width == 50 and height != 50: + self.fail("Should find image with 50x50 dimensions") + elif width == 70 and height != 70: + self.fail("Should find image with 70x70 dimensions") + + def test_exercise_with_image(self): + image_file = fileobj_exercise_image() + + # Create a question with image + image_url = exercises.CONTENT_STORAGE_FORMAT.format(f"{image_file.filename()}") + question_text = f"Identify the shape: ![shape]({image_url})" + item = self._create_assessment_item( + exercises.SINGLE_SELECTION, + question_text, + [ + {"answer": "Circle", "correct": True, "order": 1}, + {"answer": "Square", "correct": False, "order": 2}, + ], + ) + + # Associate the image with the assessment item + image_file.assessment_item = item + image_file.save() + + # Create the exercise data + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 3, + "m": 2, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.SINGLE_SELECTION}, + } + + # Create the Perseus exercise + self._create_perseus_zip(exercise_data) + + # Verify that a file was created + exercise_file = self.exercise_node.files.get(preset_id=format_presets.EXERCISE) + + # Validate the zip file + zip_file, _ = self._validate_perseus_zip(exercise_file) + + # Check that the image file was included in the zip + image_path = f"images/{image_file.filename()}" + self.assertIn(image_path, zip_file.namelist()) + + # Check that the question references the correct image path + item_json = json.loads( + zip_file.read(f"{item.assessment_id}.json").decode("utf-8") + ) + self.assertIn( + f"${exercises.IMG_PLACEHOLDER}/{image_path}", + item_json["question"]["content"], + ) + + def test_exercise_with_image_no_attached_file(self): + """Identical to the previous test, but fails to attach the file object to the assessment item""" + image_file = fileobj_exercise_image() + + # Create a question with image + image_url = exercises.CONTENT_STORAGE_FORMAT.format(f"{image_file.filename()}") + question_text = f"Identify the shape: ![shape]({image_url})" + item = self._create_assessment_item( + exercises.SINGLE_SELECTION, + question_text, + [ + {"answer": "Circle", "correct": True, "order": 1}, + {"answer": "Square", "correct": False, "order": 2}, + ], + ) + + # Create the exercise data + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 3, + "m": 2, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.SINGLE_SELECTION}, + } + + # Create the Perseus exercise + self._create_perseus_zip(exercise_data) + + # Verify that a file was created + exercise_file = self.exercise_node.files.get(preset_id=format_presets.EXERCISE) + + # Validate the zip file + zip_file, _ = self._validate_perseus_zip(exercise_file) + + # Check that the image file was included in the zip + image_path = f"images/{image_file.filename()}" + self.assertIn(image_path, zip_file.namelist()) + + # Check that the question references the correct image path + item_json = json.loads( + zip_file.read(f"{item.assessment_id}.json").decode("utf-8") + ) + self.assertIn( + f"${exercises.IMG_PLACEHOLDER}/{image_path}", + item_json["question"]["content"], + ) + + def test_exercise_with_image_deleted_file_object(self): + """Identical to the previous test, but deletes the file object""" + image_file = fileobj_exercise_image() + + # Create a question with image + image_url = exercises.CONTENT_STORAGE_FORMAT.format(f"{image_file.filename()}") + question_text = f"Identify the shape: ![shape]({image_url})" + item = self._create_assessment_item( + exercises.SINGLE_SELECTION, + question_text, + [ + {"answer": "Circle", "correct": True, "order": 1}, + {"answer": "Square", "correct": False, "order": 2}, + ], + ) + + # Create the exercise data + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 3, + "m": 2, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.SINGLE_SELECTION}, + } + + image_file.delete() + + # Create the Perseus exercise + self._create_perseus_zip(exercise_data) + + # Verify that a file was created + exercise_file = self.exercise_node.files.get(preset_id=format_presets.EXERCISE) + + # Validate the zip file + zip_file, _ = self._validate_perseus_zip(exercise_file) + + # Check that the image file was included in the zip + image_path = f"images/{image_file.filename()}" + self.assertIn(image_path, zip_file.namelist()) + + # Check that the question references the correct image path + item_json = json.loads( + zip_file.read(f"{item.assessment_id}.json").decode("utf-8") + ) + self.assertIn( + f"${exercises.IMG_PLACEHOLDER}/{image_path}", + item_json["question"]["content"], + ) + + def _create_perseus_item(self): + with open( + os.path.join( + os.path.dirname(__file__), "perseus_question_new_bar_graphs.json" + ) + ) as f: + perseus_json = f.read() + + graphie_ids = [ + "d855aefe9a722f9a794b0883ebcdb8c37b4ba0c7", + "95262ebaf42bdd1929e5d6d1e2853d3eb0a5cc74", + "ab207c6f38c887130b68c078e6158a87aab60c45", + ] + + graphie_files = [] + + for graphie_id in graphie_ids: + graphie_url = f"cdn.kastatic.org/ka-perseus-graphie/{graphie_id}" + + # Create a graphie file + graphie_file = fileobj_exercise_graphie(original_filename=graphie_id) + graphie_files.append(graphie_file) + + graphie_path = exercises.CONTENT_STORAGE_FORMAT.format(graphie_id) + + perseus_json = perseus_json.replace(graphie_url, graphie_path) + + item = AssessmentItem.objects.create( + contentnode=self.exercise_node, + assessment_id="fedcba0987654321fedcba0987654321", + type=exercises.PERSEUS_QUESTION, + raw_data=perseus_json, + order=len(self.exercise_node.assessment_items.all()) + 1, + randomize=True, + ) + + for graphie_file in graphie_files: + graphie_file.assessment_item = item + graphie_file.save() + + return item, graphie_files + + def test_exercise_with_graphie(self): + """Test creating an exercise with graphie files (SVG+JSON pairs)""" + + item, graphie_files = self._create_perseus_item() + + # Create the exercise data + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 3, + "m": 2, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.PERSEUS_QUESTION}, + } + + # Create the Perseus exercise + self._create_perseus_zip(exercise_data) + + # Verify that a file was created + exercise_file = self.exercise_node.files.get(preset_id=format_presets.EXERCISE) + + # Validate the zip file + zip_file, _ = self._validate_perseus_zip(exercise_file) + + with zip_file.open(f"{item.assessment_id}.json") as f: + processed_perseus_json = f.read().decode("utf-8") + + for graphie_file in graphie_files: + filename = graphie_file.original_filename + # Check that both SVG and JSON parts of the graphie were included + svg_path = f"images/{filename}.svg" + json_path = f"images/{filename}-data.json" + self.assertIn(svg_path, zip_file.namelist()) + self.assertIn(json_path, zip_file.namelist()) + + # Verify the content of the SVG and JSON files + svg_content = zip_file.read(svg_path).decode("utf-8") + json_content = zip_file.read(json_path).decode("utf-8") + self.assertIn("", svg_content) + self.assertIn("version", json_content) + + # The preceding $ here seems to have been unintended, as it was originally meant to be stripped out + # of the URL using exercises.CONTENT_STORAGE_REGEX. However, this is not used for URL replacement, + # and instead, we just do a replace using the CONTENT_STORAGE_PLACEHOLDER that does not have the preceding $ + # meaning that the resultant paths are preceded by $ and the IMG_PLACEHOLDER. + self.assertIn( + f"web+graphie://${exercises.IMG_PLACEHOLDER}/images/{filename}", + processed_perseus_json, + ) + + def test_formula_processing(self): + """Test that formulas are properly processed in exercises""" + # Create a question with LaTeX formulas + question_text = "Solve: $$\\frac{x}{2} = 3$$" + item = self._create_assessment_item( + exercises.INPUT_QUESTION, + question_text, + [{"answer": "6", "correct": True, "order": 1}], + ) + + # Create the exercise data + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 1, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.INPUT_QUESTION}, + } + + # Create the Perseus exercise + self._create_perseus_zip(exercise_data) + + # Verify that a file was created + exercise_file = self.exercise_node.files.get(preset_id=format_presets.EXERCISE) + + # Validate the zip file + zip_file, _ = self._validate_perseus_zip(exercise_file) + + # Check that the formula was properly processed + item_json = json.loads( + zip_file.read(f"{item.assessment_id}.json").decode("utf-8") + ) + self.assertIn("$\\frac{x}{2} = 3$", item_json["question"]["content"]) + + def test_multiple_formula_processing(self): + """Test that formulas are properly processed in exercises""" + # Create a question with LaTeX formulas + question_text = "Solve: $$\\frac{x}{2} = 3$$ or maybe $$\\frac{y}{2} = 7$$" + item = self._create_assessment_item( + exercises.INPUT_QUESTION, + question_text, + [{"answer": "6", "correct": True, "order": 1}], + ) + + # Create the exercise data + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 1, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.INPUT_QUESTION}, + } + + # Create the Perseus exercise + self._create_perseus_zip(exercise_data) + + # Verify that a file was created + exercise_file = self.exercise_node.files.get(preset_id=format_presets.EXERCISE) + + # Validate the zip file + zip_file, _ = self._validate_perseus_zip(exercise_file) + + # Check that the formula was properly processed + item_json = json.loads( + zip_file.read(f"{item.assessment_id}.json").decode("utf-8") + ) + self.assertIn( + "Solve: $\\frac{x}{2} = 3$ or maybe $\\frac{y}{2} = 7$", + item_json["question"]["content"], + ) + + def test_multiple_question_types(self): + """Test creating an exercise with multiple question types""" + # Create different types of questions + + image_file = fileobj_exercise_image() + image_url = exercises.CONTENT_STORAGE_FORMAT.format(f"{image_file.filename()}") + item1 = self._create_assessment_item( + exercises.SINGLE_SELECTION, + f"![2 + 2]({image_url})\nWhat is 2+2?", + [ + {"answer": "4", "correct": True, "order": 1}, + {"answer": "5", "correct": False, "order": 2}, + ], + assessment_id="1234567890abcdef1234567890abcdef", + ) + + image_file.assessment_item = item1 + image_file.save() + + item2 = self._create_assessment_item( + exercises.MULTIPLE_SELECTION, + "Select all prime numbers:", + [ + {"answer": "2", "correct": True, "order": 1}, + {"answer": "3", "correct": True, "order": 2}, + {"answer": "4", "correct": False, "order": 3}, + {"answer": "5", "correct": True, "order": 4}, + ], + assessment_id="2134567890abcdef1234567890abcdef", + ) + + item3 = self._create_assessment_item( + exercises.INPUT_QUESTION, + "What is the length in meters of the bar in the capital of France?", + [{"answer": "1", "order": 1}], + assessment_id="2314567890abcdef1234567890abcdef", + ) + + item4, _ = self._create_perseus_item() + + # Create the exercise data + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 3, + "m": 2, + "all_assessment_items": [ + item1.assessment_id, + item2.assessment_id, + item3.assessment_id, + item4.assessment_id, + ], + "assessment_mapping": { + item1.assessment_id: exercises.SINGLE_SELECTION, + item2.assessment_id: exercises.MULTIPLE_SELECTION, + item3.assessment_id: exercises.INPUT_QUESTION, + item4.assessment_id: exercises.PERSEUS_QUESTION, + }, + } + + # Create the Perseus exercise + self._create_perseus_zip(exercise_data) + + # Verify that a file was created + exercise_file = self.exercise_node.files.get(preset_id=format_presets.EXERCISE) + + # Validate the zip file + zip_file, parsed_data = self._validate_perseus_zip(exercise_file) + + # Check that all question files are included + self.assertIn(f"{item1.assessment_id}.json", zip_file.namelist()) + self.assertIn(f"{item2.assessment_id}.json", zip_file.namelist()) + self.assertIn(f"{item3.assessment_id}.json", zip_file.namelist()) + self.assertIn(f"{item4.assessment_id}.json", zip_file.namelist()) + + # Verify the exercise data + self.assertEqual(len(parsed_data["all_assessment_items"]), 4) + self.assertEqual( + parsed_data["assessment_mapping"][item1.assessment_id], + exercises.SINGLE_SELECTION, + ) + self.assertEqual( + parsed_data["assessment_mapping"][item2.assessment_id], + exercises.MULTIPLE_SELECTION, + ) + self.assertEqual( + parsed_data["assessment_mapping"][item3.assessment_id], + exercises.INPUT_QUESTION, + ) + self.assertEqual( + parsed_data["assessment_mapping"][item4.assessment_id], + exercises.PERSEUS_QUESTION, + ) + + # Check specifics of each question type + item1_json = json.loads( + zip_file.read(f"{item1.assessment_id}.json").decode("utf-8") + ) + self.assertIn("What is 2+2?", item1_json["question"]["content"]) + self.assertFalse( + item1_json["question"]["widgets"]["radio 1"]["options"]["multipleSelect"] + ) + + item2_json = json.loads( + zip_file.read(f"{item2.assessment_id}.json").decode("utf-8") + ) + self.assertIn("Select all prime numbers:", item2_json["question"]["content"]) + self.assertTrue( + item2_json["question"]["widgets"]["radio 1"]["options"]["multipleSelect"] + ) + + item3_json = json.loads( + zip_file.read(f"{item3.assessment_id}.json").decode("utf-8") + ) + self.assertIn( + "What is the length in meters of the bar in the capital of France?", + item3_json["question"]["content"], + ) + self.assertEqual( + item3_json["question"]["widgets"]["numeric-input 1"]["options"]["answers"][ + 0 + ]["value"], + 1, + ) + # Hard code the generated checksum for the file for this test. + # Only change this and the contents of this test if we have decided that + # we are deliberately changing the archive generation algorithm for perseus files. + self.assertEqual(exercise_file.checksum, "94de065d485e52d56c3032074044e7c3") + + def test_image_key_full_path_regression(self): + """Regression test for image key containing full path in Perseus files. + + This test ensures that the 'images' object in Perseus JSON files uses the full path + as the key (${IMG_PLACEHOLDER}/images/filename.ext) rather than just the filename. + + Bug: The image key in the 'images' object was being set to just the filename + instead of the full path with IMG_PLACEHOLDER prefix. + """ + # Create an image file + image_file = fileobj_exercise_image() + + # Create a question with image that has dimensions (to trigger images object generation) + image_url = exercises.CONTENT_STORAGE_FORMAT.format(image_file.filename()) + question_text = f"Identify the shape: ![shape]({image_url} =100x100)" + item = self._create_assessment_item( + exercises.SINGLE_SELECTION, + question_text, + [ + {"answer": "Circle", "correct": True, "order": 1}, + {"answer": "Square", "correct": False, "order": 2}, + ], + ) + + # Associate the image with the assessment item + image_file.assessment_item = item + image_file.save() + + # Create the exercise data + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 1, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.SINGLE_SELECTION}, + } + + # Create the Perseus exercise + self._create_perseus_zip(exercise_data) + + # Verify that a file was created + exercise_file = self.exercise_node.files.get(preset_id=format_presets.EXERCISE) + + # Validate the zip file + zip_file, _ = self._validate_perseus_zip(exercise_file) + + # Get the Perseus item JSON content + item_json = json.loads( + zip_file.read(f"{item.assessment_id}.json").decode("utf-8") + ) + + # The critical regression check: images object keys should contain full path + question_images = item_json["question"]["images"] + + # Should have exactly one image entry + self.assertEqual( + len(question_images), + 1, + f"Expected 1 image in images object, got {len(question_images)}: {list(question_images.keys())}", + ) + + # Get the image key from the images object + image_key = list(question_images.keys())[0] + + # The key should be the full path, not just the filename + expected_full_path = ( + f"${exercises.IMG_PLACEHOLDER}/images/{image_file.filename()}" + ) + self.assertEqual( + image_key, + expected_full_path, + f"Image key should be '{expected_full_path}' but got: '{image_key}'", + ) + + # Verify the image has the expected dimensions + image_data = question_images[image_key] + self.assertEqual(image_data["width"], 100) + self.assertEqual(image_data["height"], 100) + + def _test_image_resizing_in_field(self, field_type): + """ + Helper method to test image resizing in different fields (question, answer, hint) + + Args: + field_type: 'question', 'answer', or 'hint' + """ + # Create a base image file + base_image = fileobj_exercise_image(size=(400, 300), color="blue") + base_image_url = exercises.CONTENT_STORAGE_FORMAT.format(base_image.filename()) + + # Create scenarios for each field type + if field_type == "question": + # For questions, test multiple sizes of the same image + question_text = ( + f"First resized image: ![shape1]({base_image_url} =200x150)\n" + f"Second resized image (same): ![shape2]({base_image_url} =200x150)\n" + f"Third resized image (different): ![shape3]({base_image_url} =100x75)" + ) + answers = [{"answer": "Answer A", "correct": True, "order": 1}] + hints = [{"hint": "Hint text", "order": 1}] + + elif field_type == "answer": + # For answers, test across multiple answer options + question_text = "Select the correct description:" + answers = [ + { + "answer": f"This is a blue rectangle ![shape1]({base_image_url} =200x150)", + "correct": True, + "order": 1, + }, + { + "answer": f"This is a big blue rectangle ![shape2]({base_image_url} =200x150)", + "correct": False, + "order": 2, + }, + { + "answer": f"This is a small blue rectangle ![shape3]({base_image_url} =100x75)", + "correct": False, + "order": 3, + }, + ] + hints = [{"hint": "Hint text", "order": 1}] + + else: # hint + # For hints, test across multiple hints + question_text = "What shape is this?" + answers = [{"answer": "Rectangle", "correct": True, "order": 1}] + hints = [ + { + "hint": f"Look at the proportions ![shape1]({base_image_url} =200x150)", + "order": 1, + }, + { + "hint": f"It has four sides ![shape2]({base_image_url} =200x150)", + "order": 2, + }, + { + "hint": f"It's a small rectangle ![shape3]({base_image_url} =100x75)", + "order": 3, + }, + ] + + # Create the assessment item + item_type = exercises.SINGLE_SELECTION + + item = self._create_assessment_item(item_type, question_text, answers, hints) + + # Associate the image with the assessment item + base_image.assessment_item = item + base_image.save() + + # Create exercise data + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 2, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: item_type}, + } + + # Create the Perseus exercise + self._create_perseus_zip(exercise_data) + + # Get the exercise file + exercise_file = self.exercise_node.files.get(preset_id=format_presets.EXERCISE) + + # Validate the zip file + zip_file, _ = self._validate_perseus_zip(exercise_file) + + # Get all image files in the zip + image_files = [ + name for name in zip_file.namelist() if name.startswith("images/") + ] + + # Verify we have exactly 2 image files (one for each unique size) + # We should have one at 200x150 and one at 100x75 + self.assertEqual( + len(image_files), + 2, + f"Expected 2 resized images, found {len(image_files)}: {image_files}", + ) + + # Load the item JSON to check image references + item_json = json.loads( + zip_file.read(f"{item.assessment_id}.json").decode("utf-8") + ) + + # Determine where to look for the content based on field type + if field_type == "question": + content = item_json["question"]["content"] + elif field_type == "answer": + answer_widgets = item_json["question"]["widgets"] + radio_widget = answer_widgets.get("radio 1") or answer_widgets.get( + "checkbox 1" + ) + content = "".join( + choice["content"] for choice in radio_widget["options"]["choices"] + ) + else: # hint + content = "".join(hint["content"] for hint in item_json["hints"]) + + # Extract image filenames from the content using regex + pattern = r"images/([a-f0-9]+\.(png|jpg|jpeg|gif))" + matches = re.findall(pattern, content) + + # Get unique image filenames + unique_image_files = set(match[0] for match in matches) + + # Check if we have references to both resized versions + self.assertEqual( + len(unique_image_files), + 2, + f"Expected 2 unique image references, found {len(unique_image_files)}", + ) + + # The original image should not be present unless it was referenced without resizing + original_image_name = f"images/{base_image.filename()}" + self.assertNotIn( + original_image_name, + zip_file.namelist(), + "Original image should not be included when only resized versions are used", + ) + + # Verify that the same dimensions use the same resized image + if field_type == "question": + # Extract the first two image references (they should be the same) + first_image_refs = re.findall( + pattern, content.split("Second resized image")[0] + ) + second_image_refs = re.findall( + pattern, + content.split("Second resized image")[1].split("Third resized image")[ + 0 + ], + ) + + self.assertEqual( + first_image_refs[0][0], + second_image_refs[0][0], + "Same-sized images should reference the same file", + ) + + # Check that the images in the zip have different filesizes + image_sizes = [] + for image_file in image_files: + image_sizes.append(len(zip_file.read(image_file))) + + # Images with different dimensions should have different sizes + self.assertNotEqual( + image_sizes[0], + image_sizes[1], + "Different sized images should have different file sizes", + ) + + # Verify that the dimensions have been stripped from the markdown + for file_name in unique_image_files: + # Because we can't predict the set ordering, just confirm that + # neither dimension descriptor is applied. + first_file = f"{file_name} =200x150" + self.assertNotIn(first_file, content) + second_file = f"{file_name} =100x75" + self.assertNotIn(second_file, content) + + def test_image_resizing_in_question(self): + """Test image resizing functionality in question content""" + self._test_image_resizing_in_field("question") + + def test_image_resizing_in_answer(self): + """Test image resizing functionality in answer content""" + self._test_image_resizing_in_field("answer") + + def test_image_resizing_in_hint(self): + """Test image resizing functionality in hint content""" + self._test_image_resizing_in_field("hint") + + def test_image_with_same_resize_dimensions(self): + """Test handling of multiple instances of the same image with the same resize dimensions""" + # Create a base image file + base_image = fileobj_exercise_image(size=(400, 300), color="green") + base_image_url = exercises.CONTENT_STORAGE_FORMAT.format(base_image.filename()) + + # Create a question with multiple references to the same image with same dimensions + question_text = ( + f"First image: ![shape1]({base_image_url} =200x150)\n" + f"Second image: ![shape2]({base_image_url} =200x150)\n" + f"Third image: ![shape3]({base_image_url} =200x150)" + ) + + # Create the assessment item + item = self._create_assessment_item( + exercises.SINGLE_SELECTION, + question_text, + [{"answer": "Answer", "correct": True, "order": 1}], + ) + + # Associate the image with the assessment item + base_image.assessment_item = item + base_image.save() + + # Create exercise data + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 1, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.SINGLE_SELECTION}, + } + + # Create the Perseus exercise + self._create_perseus_zip(exercise_data) + + # Get the exercise file + exercise_file = self.exercise_node.files.get(preset_id=format_presets.EXERCISE) + + # Validate the zip file + zip_file, _ = self._validate_perseus_zip(exercise_file) + + # Get all image files in the zip + image_files = [ + name for name in zip_file.namelist() if name.startswith("images/") + ] + + # Verify we have exactly 1 image file (all references are to the same size) + self.assertEqual( + len(image_files), + 1, + f"Expected 1 resized image, found {len(image_files)}: {image_files}", + ) + + # Check that all three references point to the same image file + item_json = json.loads( + zip_file.read(f"{item.assessment_id}.json").decode("utf-8") + ) + content = item_json["question"]["content"] + + # Extract image filenames from the content + pattern = r"images/([a-f0-9]+\.(png|jpg|jpeg|gif))" + matches = re.findall(pattern, content) + + # All matches should reference the same file + self.assertEqual(len(matches), 3, "Expected 3 image references") + self.assertEqual( + matches[0][0], + matches[1][0], + "First and second image references should match", + ) + self.assertEqual( + matches[1][0], + matches[2][0], + "Second and third image references should match", + ) + + def test_image_with_similar_dimensions(self): + """Test handling of image resizing with similar but not identical dimensions""" + # Create a base image file + base_image = fileobj_exercise_image(size=(400, 300), color="red") + base_image_url = exercises.CONTENT_STORAGE_FORMAT.format(base_image.filename()) + + # Create a question with images that have very similar dimensions + # The code has logic to use the same image if dimensions are within 1% of each other + question_text = ( + f"First image: ![shape1]({base_image_url} =200x150)\n" + f"Second image (0.5% larger): ![shape2]({base_image_url} =201x151)\n" # Within 1% threshold + f"Third image (1.5% larger): ![shape3]({base_image_url} =203x152)" # Outside 1% threshold + ) + + # Create the assessment item + item = self._create_assessment_item( + exercises.SINGLE_SELECTION, + question_text, + [{"answer": "Answer", "correct": True, "order": 1}], + ) + + # Associate the image with the assessment item + base_image.assessment_item = item + base_image.save() + + # Create exercise data + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 1, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.SINGLE_SELECTION}, + } + + # Create the Perseus exercise + self._create_perseus_zip(exercise_data) + + # Get the exercise file + exercise_file = self.exercise_node.files.get(preset_id=format_presets.EXERCISE) + + # Validate the zip file + zip_file, _ = self._validate_perseus_zip(exercise_file) + + # Get all image files in the zip + image_files = [ + name for name in zip_file.namelist() if name.startswith("images/") + ] + + # Verify we have exactly 2 image files (200x150/201x151 should share one file, 203x152 gets its own) + self.assertEqual( + len(image_files), + 2, + f"Expected 2 resized images, found {len(image_files)}: {image_files}", + ) + + # Check the image references in the content + item_json = json.loads( + zip_file.read(f"{item.assessment_id}.json").decode("utf-8") + ) + content = item_json["question"]["content"] + + # Extract image filenames from the content + pattern = r"images/([a-f0-9]+\.(png|jpg|jpeg|gif))" + matches = re.findall(pattern, content) + + # First and second should match (within 1% threshold) + # Third should be different (outside threshold) + first_image = matches[0][0] + second_image = matches[1][0] + third_image = matches[2][0] + + self.assertEqual( + first_image, + second_image, + "Images with dimensions within 1% threshold should use the same file", + ) + self.assertNotEqual( + first_image, + third_image, + "Images with dimensions outside 1% threshold should use different files", + ) + + def test_image_with_zero_width(self): + # Create a base image file + base_image = fileobj_exercise_image(size=(400, 300), color="red") + base_image_url = exercises.CONTENT_STORAGE_FORMAT.format(base_image.filename()) + + # Create a question with images that have very similar dimensions + # The code has logic to use the same image if dimensions are within 1% of each other + question_text = ( + f"First image: ![shape1]({base_image_url} =0x150)\n" + f"Second image: ![shape2]({base_image_url} =200x151)" + ) + + # Create the assessment item + item = self._create_assessment_item( + exercises.SINGLE_SELECTION, + question_text, + [{"answer": "Answer", "correct": True, "order": 1}], + ) + + # Associate the image with the assessment item + base_image.assessment_item = item + base_image.save() + + # Create exercise data + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 1, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.SINGLE_SELECTION}, + } + + # Create the Perseus exercise + self._create_perseus_zip(exercise_data) + + # Get the exercise file + exercise_file = self.exercise_node.files.get(preset_id=format_presets.EXERCISE) + + # Validate the zip file + zip_file, _ = self._validate_perseus_zip(exercise_file) + + # Get all image files in the zip + image_files = [ + name for name in zip_file.namelist() if name.startswith("images/") + ] + + # Verify we have exactly 1 image file + self.assertEqual( + len(image_files), + 1, + f"Expected 1 resized images, found {len(image_files)}: {image_files}", + ) + + +class TestQTIExerciseCreation(StudioTestCase): + """ + Tests for the QTI exercise generator which handles QTI format exercise file generation. + + These tests verify that the function correctly packages assessment items + into a valid QTI Content Package with IMS manifest and individual item XML files. + """ + + maxDiff = None + + def setUp(self): + self.setUpBase() + + # Create an exercise node + self.exercise_node = ContentNode.objects.create( + title="Test QTI Exercise", + node_id="1234567890abcdef1234567890abcded", + content_id="fedcba0987654321fedcba0987654321", + kind_id=content_kinds.EXERCISE, + parent=self.channel.main_tree, + extra_fields=json.dumps( + { + "randomize": True, + "options": { + "completion_criteria": { + "model": "mastery", + "threshold": { + "mastery_model": exercises.M_OF_N, + "m": 3, + "n": 5, + }, + } + }, + } + ), + ) + + def _create_assessment_item( + self, item_type, question_text, answers, hints=None, assessment_id=None + ): + """Helper to create assessment items with the right structure""" + if hints is None: + hints = [{"hint": "This is a hint", "order": 1}] + + item = AssessmentItem.objects.create( + contentnode=self.exercise_node, + assessment_id=assessment_id or uuid4().hex, + type=item_type, + question=question_text, + answers=json.dumps(answers), + hints=json.dumps(hints), + raw_data="{}", + order=len(self.exercise_node.assessment_items.all()) + 1, + randomize=True, + ) + return item + + def _create_qti_zip(self, exercise_data): + """Create QTI exercise zip using the generator""" + generator = QTIExerciseGenerator( + self.exercise_node, + exercise_data, + self.channel.id, + "en-US", + user_id=self.user.id, + ) + return generator.create_exercise_archive() + + def _normalize_xml(self, xml_string): + return "".join(x.strip() for x in xml_string.split("\n")) + + def _validate_qti_zip_structure(self, exercise_file): + """Helper to validate basic structure of the QTI Content Package""" + # Use Django's storage backend to read the file + with storage.open(exercise_file.file_on_disk.name, "rb") as f: + zip_data = f.read() + + zip_file = zipfile.ZipFile(BytesIO(zip_data)) + + # Check that the imsmanifest.xml file exists + assert ( + "imsmanifest.xml" in zip_file.namelist() + ), "imsmanifest.xml not found in zip file" + + return zip_file + + def test_basic_qti_exercise_creation(self): + """Test the basic creation of a QTI exercise with a single question""" + # Create a simple multiple choice question with 32-char hex ID + assessment_id = "1234567890abcdef1234567890abcdef" + item = self._create_assessment_item( + exercises.SINGLE_SELECTION, + "What is 2+2?", + [ + {"answer": "4", "correct": True, "order": 1}, + {"answer": "3", "correct": False, "order": 2}, + {"answer": "5", "correct": False, "order": 3}, + ], + assessment_id=assessment_id, + ) + + # Create the exercise data structure + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 5, + "m": 3, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.SINGLE_SELECTION}, + } + + # Call the function to create the QTI exercise + self._create_qti_zip(exercise_data) + + # Verify that a file was created for the node + exercise_file = self.exercise_node.files.get(preset_id=format_presets.QTI_ZIP) + self.assertIsNotNone(exercise_file) + self.assertEqual(exercise_file.file_format_id, "zip") + + # Validate the contents of the zip file + zip_file = self._validate_qti_zip_structure(exercise_file) + + # Check that the assessment item XML file exists + expected_item_file = "items/KEjRWeJCrze8SNFZ4kKvN7w.xml" + self.assertIn(expected_item_file, zip_file.namelist()) + + # Get the actual QTI item XML content + actual_item_xml = zip_file.read(expected_item_file).decode("utf-8") + + # Expected QTI item XML content + expected_item_xml = """ + + + + choice_0 + + + + + + +

    What is 2+2?

    + +

    4

    +

    3

    +

    5

    + + + +""" + + # Compare normalized XML + self.assertEqual( + self._normalize_xml(expected_item_xml), + self._normalize_xml(actual_item_xml), + ) + + # Get the actual IMS manifest content + actual_manifest_xml = zip_file.read("imsmanifest.xml").decode("utf-8") + + # Expected IMS manifest XML content + expected_manifest_xml = """ + + + QTI Package + 3.0.0 + + + + + + + +""" + + # Compare normalized XML + self.assertEqual( + self._normalize_xml(expected_manifest_xml), + self._normalize_xml(actual_manifest_xml), + ) + + def test_multiple_selection_question(self): + """Test QTI generation for multiple selection questions""" + assessment_id = "abcdef1234567890abcdef1234567890" + item = self._create_assessment_item( + exercises.MULTIPLE_SELECTION, + "Select all prime numbers:", + [ + {"answer": "2", "correct": True, "order": 1}, + {"answer": "3", "correct": True, "order": 2}, + {"answer": "4", "correct": False, "order": 3}, + {"answer": "5", "correct": True, "order": 4}, + ], + assessment_id=assessment_id, + ) + + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 1, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.MULTIPLE_SELECTION}, + } + + self._create_qti_zip(exercise_data) + exercise_file = self.exercise_node.files.get(preset_id=format_presets.QTI_ZIP) + zip_file = self._validate_qti_zip_structure(exercise_file) + + qti_id = hex_to_qti_id(assessment_id) + + # Check the QTI XML for multiple selection specifics + expected_item_file = f"items/{qti_id}.xml" + actual_item_xml = zip_file.read(expected_item_file).decode("utf-8") + + # Expected QTI item XML content for multiple selection + expected_item_xml = """ + + + + choice_0 + choice_1 + choice_3 + + + + + + +

    Select all prime numbers:

    +
    +

    2

    +

    3

    +

    4

    +

    5

    +
    +
    + +
    """ + + # Compare normalized XML + self.assertEqual( + self._normalize_xml(expected_item_xml), + self._normalize_xml(actual_item_xml), + ) + + def test_free_response_question(self): + assessment_id = "fedcba0987654321fedcba0987654321" + item = self._create_assessment_item( + exercises.FREE_RESPONSE, + "What is the capital of France?", + [{"answer": "Paris", "correct": True, "order": 1}], + assessment_id=assessment_id, + ) + + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 1, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.FREE_RESPONSE}, + } + + self._create_qti_zip(exercise_data) + exercise_file = self.exercise_node.files.get(preset_id=format_presets.QTI_ZIP) + zip_file = self._validate_qti_zip_structure(exercise_file) + + # Check the QTI XML for text entry specifics + expected_item_file = "items/K_ty6CYdlQyH-3LoJh2VDIQ.xml" + actual_item_xml = zip_file.read(expected_item_file).decode("utf-8") + + # Expected QTI item XML content for text entry + expected_item_xml = """ + + + + Paris + + + + +
    +

    What is the capital of France?

    +

    +
    +
    + +
    """ + + # Compare normalized XML + self.assertEqual( + self._normalize_xml(expected_item_xml), + self._normalize_xml(actual_item_xml), + ) + + def test_free_response_question_no_answers(self): + assessment_id = "fedcba0987654321fedcba0987654321" + item = self._create_assessment_item( + exercises.FREE_RESPONSE, + "What is the capital of France?", + [], + assessment_id=assessment_id, + ) + + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 1, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.FREE_RESPONSE}, + } + + self._create_qti_zip(exercise_data) + exercise_file = self.exercise_node.files.get(preset_id=format_presets.QTI_ZIP) + zip_file = self._validate_qti_zip_structure(exercise_file) + + # Check the QTI XML for text entry specifics + expected_item_file = "items/K_ty6CYdlQyH-3LoJh2VDIQ.xml" + actual_item_xml = zip_file.read(expected_item_file).decode("utf-8") + + # Expected QTI item XML content for text entry + expected_item_xml = """ + + + + +
    +

    What is the capital of France?

    +

    +
    +
    + +
    """ + + # Compare normalized XML + self.assertEqual( + self._normalize_xml(expected_item_xml), + self._normalize_xml(actual_item_xml), + ) + + def test_free_response_question_with_maths(self): + assessment_id = "fedcba0987654321fedcba0987654321" + item = self._create_assessment_item( + exercises.FREE_RESPONSE, + "$$\\sum_n^sxa^n$$\n\n What does this even mean?", + [{"answer": "Nothing", "correct": True, "order": 1}], + assessment_id=assessment_id, + ) + + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 1, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.FREE_RESPONSE}, + } + + self._create_qti_zip(exercise_data) + exercise_file = self.exercise_node.files.get(preset_id=format_presets.QTI_ZIP) + zip_file = self._validate_qti_zip_structure(exercise_file) + + # Check the QTI XML for text entry specifics + expected_item_file = "items/K_ty6CYdlQyH-3LoJh2VDIQ.xml" + actual_item_xml = zip_file.read(expected_item_file).decode("utf-8") + + # Expected QTI item XML content for text entry + expected_item_xml = """ + + + + Nothing + + + + +
    + + + + ns + x + an + + \\sum_n^sxa^n + + +

    What does this even mean?

    +

    +
    +
    + +
    """ + + # Compare normalized XML + self.assertEqual( + self._normalize_xml(expected_item_xml), + self._normalize_xml(actual_item_xml), + ) + + def test_perseus_question_rejection(self): + """Test that Perseus questions are properly rejected""" + assessment_id = "aaaa1111bbbb2222cccc3333dddd4444" + # Create a mock Perseus question + item = AssessmentItem.objects.create( + contentnode=self.exercise_node, + assessment_id=assessment_id, + type=exercises.PERSEUS_QUESTION, + raw_data='{"question": {"content": "Perseus content"}}', + order=1, + ) + + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 1, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.PERSEUS_QUESTION}, + } + + # Should raise ValueError for Perseus questions + with self.assertRaises(ValueError) as context: + self._create_qti_zip(exercise_data) + + self.assertIn("Perseus questions are not supported", str(context.exception)) + + def test_exercise_with_image(self): + """Test QTI exercise generation with images""" + assessment_id = "1111aaaa2222bbbb3333cccc4444dddd" + image_file = fileobj_exercise_image() + + # Create a question with image + image_url = exercises.CONTENT_STORAGE_FORMAT.format(f"{image_file.filename()}") + question_text = f"Identify the shape: ![shape]({image_url})" + item = self._create_assessment_item( + exercises.SINGLE_SELECTION, + question_text, + [ + {"answer": "Circle", "correct": True, "order": 1}, + {"answer": "Square", "correct": False, "order": 2}, + ], + assessment_id=assessment_id, + ) + + # Associate the image with the assessment item + image_file.assessment_item = item + image_file.save() + + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 1, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.SINGLE_SELECTION}, + } + + self._create_qti_zip(exercise_data) + exercise_file = self.exercise_node.files.get(preset_id=format_presets.QTI_ZIP) + zip_file = self._validate_qti_zip_structure(exercise_file) + + # Check that the image file was included in the zip + image_path = f"items/images/{image_file.filename()}" + self.assertIn(image_path, zip_file.namelist()) + + # Get the actual manifest content + actual_manifest_xml = zip_file.read("imsmanifest.xml").decode("utf-8") + + # Expected manifest should include the image file dependency + expected_manifest_xml = f""" + + + QTI Package + 3.0.0 + + + + + + + + +""" + + # Compare normalized XML + self.assertEqual( + self._normalize_xml(expected_manifest_xml), + self._normalize_xml(actual_manifest_xml), + ) + + self.assertEqual(exercise_file.checksum, "8df26b0c7009ae84fe148cceda8e0138") + + def test_image_resizing(self): + # Create a base image file + base_image = fileobj_exercise_image(size=(400, 300), color="blue") + base_image_url = exercises.CONTENT_STORAGE_FORMAT.format(base_image.filename()) + + # For questions, test multiple sizes of the same image + question_text = ( + f"First resized image: ![shape1]({base_image_url} =200x150)\n\n" + f"Second resized image (same): ![shape2]({base_image_url} =200x150)\n\n" + f"Third resized image (different): ![shape3]({base_image_url} =100x75)" + ) + answers = [{"answer": "Answer A", "correct": True, "order": 1}] + hints = [{"hint": "Hint text", "order": 1}] + + # Create the assessment item + item_type = exercises.SINGLE_SELECTION + + item = self._create_assessment_item(item_type, question_text, answers, hints) + + # Associate the image with the assessment item + base_image.assessment_item = item + base_image.save() + + # Create exercise data + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 2, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: item_type}, + } + + # Create the Perseus exercise + self._create_qti_zip(exercise_data) + + exercise_file = self.exercise_node.files.get(preset_id=format_presets.QTI_ZIP) + zip_file = self._validate_qti_zip_structure(exercise_file) + + # Get all image files in the zip + image_files = [ + name for name in zip_file.namelist() if name.startswith("items/images/") + ] + + # Verify we have exactly 2 image files (one for each unique size) + # We should have one at 200x150 and one at 100x75 + self.assertEqual( + len(image_files), + 2, + f"Expected 2 resized images, found {len(image_files)}: {image_files}", + ) + + # The original image should not be present unless it was referenced without resizing + original_image_name = f"images/{base_image.filename()}" + self.assertNotIn( + original_image_name, + zip_file.namelist(), + "Original image should not be included when only resized versions are used", + ) + + qti_id = hex_to_qti_id(item.assessment_id) + + # Check the QTI XML for mathematical content conversion to MathML + expected_item_file = f"items/{qti_id}.xml" + actual_item_xml = zip_file.read(expected_item_file).decode("utf-8") + + # Expected QTI item XML content with MathML conversion + expected_item_xml = f""" + + + + choice_0 + + + + + + +

    First resized image: shape1

    +

    Second resized image (same): shape2

    +

    Third resized image (different): shape3

    +
    + +

    Answer A

    +
    +
    +
    + +
    """ + + # Compare normalized XML + self.assertEqual( + self._normalize_xml(expected_item_xml), + self._normalize_xml(actual_item_xml), + ) + + def test_question_with_mathematical_content(self): + """Test QTI generation for questions containing mathematical formulas converted to MathML""" + assessment_id = "dddddddddddddddddddddddddddddddd" + item = self._create_assessment_item( + exercises.SINGLE_SELECTION, + "Solve the equation $$\\frac{x}{2} = 3$$ for x. What is the value of x?", + [ + {"answer": "6", "correct": True, "order": 1}, + {"answer": "3", "correct": False, "order": 2}, + {"answer": "1.5", "correct": False, "order": 3}, + {"answer": "9", "correct": False, "order": 4}, + ], + assessment_id=assessment_id, + ) + + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 1, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.SINGLE_SELECTION}, + } + + self._create_qti_zip(exercise_data) + exercise_file = self.exercise_node.files.get(preset_id=format_presets.QTI_ZIP) + zip_file = self._validate_qti_zip_structure(exercise_file) + + qti_id = hex_to_qti_id(assessment_id) + + # Check the QTI XML for mathematical content conversion to MathML + expected_item_file = f"items/{qti_id}.xml" + actual_item_xml = zip_file.read(expected_item_file).decode("utf-8") + + # Expected QTI item XML content with MathML conversion + expected_item_xml = f""" + + + + choice_0 + + + + + + +

    Solve the equation x2=3\\frac{{x}}{{2}} = 3 for x. What is the value of x?

    +
    +

    6

    +

    3

    +

    1.5

    +

    9

    +
    +
    + +
    """ + + # Compare normalized XML + self.assertEqual( + self._normalize_xml(expected_item_xml), + self._normalize_xml(actual_item_xml), + ) + + def test_multiple_question_types_mixed(self): + """Test creating a QTI exercise with multiple supported question types""" + # Create different types of supported questions with 32-char hex IDs + assessment_id1 = "1111111111111111111111111111111a" + assessment_id2 = "2222222222222222222222222222222b" + assessment_id3 = "3333333333333333333333333333333c" + + qti_id1 = hex_to_qti_id(assessment_id1) + qti_id2 = hex_to_qti_id(assessment_id2) + qti_id3 = hex_to_qti_id(assessment_id3) + + item1 = self._create_assessment_item( + exercises.SINGLE_SELECTION, + "What is 2+2?", + [ + {"answer": "4", "correct": True, "order": 1}, + {"answer": "5", "correct": False, "order": 2}, + ], + assessment_id=assessment_id1, + ) + + item2 = self._create_assessment_item( + exercises.MULTIPLE_SELECTION, + "Select all even numbers:", + [ + {"answer": "2", "correct": True, "order": 1}, + {"answer": "3", "correct": False, "order": 2}, + {"answer": "4", "correct": True, "order": 3}, + {"answer": "5", "correct": False, "order": 4}, + ], + assessment_id=assessment_id2, + ) + + item3 = self._create_assessment_item( + exercises.INPUT_QUESTION, + "What is the capital of Spain?", + [{"answer": "Madrid", "correct": True, "order": 1}], + assessment_id=assessment_id3, + ) + + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 3, + "m": 2, + "all_assessment_items": [ + item1.assessment_id, + item2.assessment_id, + item3.assessment_id, + ], + "assessment_mapping": { + item1.assessment_id: exercises.SINGLE_SELECTION, + item2.assessment_id: exercises.MULTIPLE_SELECTION, + item3.assessment_id: exercises.INPUT_QUESTION, + }, + } + + self._create_qti_zip(exercise_data) + exercise_file = self.exercise_node.files.get(preset_id=format_presets.QTI_ZIP) + zip_file = self._validate_qti_zip_structure(exercise_file) + + # Check that all question XML files are included + expected_files = [ + f"items/{qti_id1}.xml", + f"items/{qti_id2}.xml", + f"items/{qti_id3}.xml", + ] + + for expected_file in expected_files: + self.assertIn(expected_file, zip_file.namelist()) + + # Get the actual manifest content + actual_manifest_xml = zip_file.read("imsmanifest.xml").decode("utf-8") + + # Expected manifest with all three resources + expected_manifest_xml = f""" + + + QTI Package + 3.0.0 + + + + + + + + + + + + + +""" + + # Compare normalized XML + self.assertEqual( + self._normalize_xml(expected_manifest_xml), + self._normalize_xml(actual_manifest_xml), + ) + + self.assertEqual(exercise_file.checksum, "8e488543ef52f0b153553eaf9fb51419") + + def test_unsupported_question_type(self): + """Test that unsupported question types raise appropriate errors""" + assessment_id = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + # Create an item with an unsupported type + item = AssessmentItem.objects.create( + contentnode=self.exercise_node, + assessment_id=assessment_id, + type="UNSUPPORTED_TYPE", + question="This is an unsupported question type", + answers="[]", + hints="[]", + raw_data="{}", + order=1, + ) + + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 1, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: "UNSUPPORTED_TYPE"}, + } + + with self.assertRaises(ValueError) as context: + self._create_qti_zip(exercise_data) + + self.assertIn("Unsupported question type", str(context.exception)) + + def test_manifest_structure_single_item(self): + """Test that the IMS manifest has proper structure and metadata for a single item""" + assessment_id = "cccccccccccccccccccccccccccccccc" + item = self._create_assessment_item( + exercises.SINGLE_SELECTION, + "Test question", + [{"answer": "Test answer", "correct": True, "order": 1}], + assessment_id=assessment_id, + ) + + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 1, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.SINGLE_SELECTION}, + } + + self._create_qti_zip(exercise_data) + exercise_file = self.exercise_node.files.get(preset_id=format_presets.QTI_ZIP) + zip_file = self._validate_qti_zip_structure(exercise_file) + + # Get the actual manifest content + actual_manifest_xml = zip_file.read("imsmanifest.xml").decode("utf-8") + + # Expected exact manifest structure + expected_manifest_xml = """ + + + QTI Package + 3.0.0 + + + + + + + +""" + + # Compare normalized XML + self.assertEqual( + self._normalize_xml(expected_manifest_xml), + self._normalize_xml(actual_manifest_xml), + ) + + def test_input_question(self): + assessment_id = "fedcba0987654321fedcba0987654321" + item = self._create_assessment_item( + exercises.INPUT_QUESTION, + "What positive integers are less than 3?", + [ + {"answer": 1, "correct": True, "order": 1}, + {"answer": 2, "correct": True, "order": 2}, + ], + assessment_id=assessment_id, + ) + + exercise_data = { + "mastery_model": exercises.M_OF_N, + "randomize": True, + "n": 1, + "m": 1, + "all_assessment_items": [item.assessment_id], + "assessment_mapping": {item.assessment_id: exercises.INPUT_QUESTION}, + } + + self._create_qti_zip(exercise_data) + exercise_file = self.exercise_node.files.get(preset_id=format_presets.QTI_ZIP) + zip_file = self._validate_qti_zip_structure(exercise_file) + + # Check the QTI XML for text entry specifics + expected_item_file = "items/K_ty6CYdlQyH-3LoJh2VDIQ.xml" + actual_item_xml = zip_file.read(expected_item_file).decode("utf-8") + + # Expected QTI item XML content for text entry + expected_item_xml = """ + + + + 1 + 2 + + + + +
    +

    What positive integers are less than 3?

    +

    +
    +
    + +
    """ + + # Compare normalized XML + self.assertEqual( + self._normalize_xml(expected_item_xml), + self._normalize_xml(actual_item_xml), + ) diff --git a/contentcuration/contentcuration/tests/utils/test_garbage_collect.py b/contentcuration/contentcuration/tests/utils/test_garbage_collect.py index f67daf8c28..b12c25cd31 100644 --- a/contentcuration/contentcuration/tests/utils/test_garbage_collect.py +++ b/contentcuration/contentcuration/tests/utils/test_garbage_collect.py @@ -170,7 +170,11 @@ def _create_deleted_user_in_past(deletion_datetime, email="test@test.com"): user = create_user(email, "password", "test", "test") user.delete() - user_latest_delete_history = UserHistory.objects.filter(user_id=user.id, action=user_history.DELETION).order_by("-performed_at").first() + user_latest_delete_history = ( + UserHistory.objects.filter(user_id=user.id, action=user_history.DELETION) + .order_by("-performed_at") + .first() + ) user_latest_delete_history.performed_at = deletion_datetime user_latest_delete_history.save() return user @@ -180,28 +184,46 @@ class CleanUpSoftDeletedExpiredUsersTestCase(StudioTestCase): def test_cleanup__all_expired_soft_deleted_users(self): expired_users = [] for i in range(0, 5): - expired_users.append(_create_deleted_user_in_past(deletion_datetime=THREE_MONTHS_AGO, email=f"test-{i}@test.com")) + expired_users.append( + _create_deleted_user_in_past( + deletion_datetime=THREE_MONTHS_AGO, email=f"test-{i}@test.com" + ) + ) clean_up_soft_deleted_users() for user in expired_users: - assert UserHistory.objects.filter(user_id=user.id, action=user_history.RELATED_DATA_HARD_DELETION).exists() is True + assert ( + UserHistory.objects.filter( + user_id=user.id, action=user_history.RELATED_DATA_HARD_DELETION + ).exists() + is True + ) def test_no_cleanup__unexpired_soft_deleted_users(self): two_months_ago = datetime.now() - timedelta(days=63) user = _create_deleted_user_in_past(deletion_datetime=two_months_ago) clean_up_soft_deleted_users() - assert UserHistory.objects.filter(user_id=user.id, action=user_history.RELATED_DATA_HARD_DELETION).exists() is False + assert ( + UserHistory.objects.filter( + user_id=user.id, action=user_history.RELATED_DATA_HARD_DELETION + ).exists() + is False + ) def test_no_cleanup__undeleted_users(self): user = create_user("test@test.com", "password", "test", "test") clean_up_soft_deleted_users() assert user.deleted is False - assert UserHistory.objects.filter(user_id=user.id, action=user_history.RELATED_DATA_HARD_DELETION).exists() is False + assert ( + UserHistory.objects.filter( + user_id=user.id, action=user_history.RELATED_DATA_HARD_DELETION + ).exists() + is False + ) class CleanUpContentNodesTestCase(StudioTestCase): - def test_delete_all_contentnodes_in_orphanage_tree(self): """ Make sure that by default, all nodes created with a timestamp of 3 months @@ -214,11 +236,17 @@ def test_delete_all_contentnodes_in_orphanage_tree(self): _create_expired_contentnode() # sanity check to see if we have X contentnodes under the garbage tree - assert ContentNode.objects.filter(parent_id=settings.ORPHANAGE_ROOT_ID).count() == num_contentnodes + assert ( + ContentNode.objects.filter(parent_id=settings.ORPHANAGE_ROOT_ID).count() + == num_contentnodes + ) # now clean up our contentnodes, and check that our descendant count is indeed 0 now clean_up_contentnodes() - assert ContentNode.objects.filter(parent_id=settings.ORPHANAGE_ROOT_ID).count() == 0 + assert ( + ContentNode.objects.filter(parent_id=settings.ORPHANAGE_ROOT_ID).count() + == 0 + ) def test_deletes_associated_files(self): @@ -366,15 +394,12 @@ def test_doesnt_delete_file_referenced_by_orphan_and_nonorphan_nodes(self): class CleanUpFeatureFlagsTestCase(StudioTestCase): - def setUp(self): return super(CleanUpFeatureFlagsTestCase, self).setUpBase() def test_clean_up(self): key = "feature_flag_does_not_exist" - self.user.feature_flags = { - key: True - } + self.user.feature_flags = {key: True} self.user.save() clean_up_feature_flags() self.user.refresh_from_db() @@ -382,15 +407,22 @@ def test_clean_up(self): class CleanupTaskTestCase(StudioTestCase): - def setUp(self): - self.pruned_task = TaskResult.objects.create(task_id=uuid.uuid4().hex, status=states.SUCCESS, task_name="pruned_task") - self.failed_task = TaskResult.objects.create(task_id=uuid.uuid4().hex, status=states.FAILURE, task_name="failed_task") - self.recent_task = TaskResult.objects.create(task_id=uuid.uuid4().hex, status=states.SUCCESS, task_name="recent_task") + self.pruned_task = TaskResult.objects.create( + task_id=uuid.uuid4().hex, status=states.SUCCESS, task_name="pruned_task" + ) + self.failed_task = TaskResult.objects.create( + task_id=uuid.uuid4().hex, status=states.FAILURE, task_name="failed_task" + ) + self.recent_task = TaskResult.objects.create( + task_id=uuid.uuid4().hex, status=states.SUCCESS, task_name="recent_task" + ) # `date_done` uses `auto_now`, so manually set it done = datetime.now() - timedelta(days=8) - TaskResult.objects.filter(pk__in=[self.pruned_task.pk, self.failed_task.pk]).update(date_done=done) + TaskResult.objects.filter( + pk__in=[self.pruned_task.pk, self.failed_task.pk] + ).update(date_done=done) # run clean_up_tasks() @@ -413,7 +445,7 @@ def test_recent_task(self): def _create_stale_file(user, modified_date): - checksum = '%32x' % random.getrandbits(16 * 8) + checksum = "%32x" % random.getrandbits(16 * 8) file = File( file_size=5, checksum=checksum, @@ -433,7 +465,6 @@ def _create_stale_file(user, modified_date): class CleanupStaleFilesTestCase(StudioTestCase): - def setUp(self): user = self.admin_user diff --git a/contentcuration/contentcuration/tests/utils/test_markdown.py b/contentcuration/contentcuration/tests/utils/test_markdown.py new file mode 100644 index 0000000000..c111146a85 --- /dev/null +++ b/contentcuration/contentcuration/tests/utils/test_markdown.py @@ -0,0 +1,213 @@ +import unittest + +from contentcuration.utils.assessment.markdown import render_markdown +from contentcuration.utils.assessment.qti import ElementTreeBase + + +class TexMathTestMixin: + """Mixin providing test methods for TexMath plugin tests""" + + def _assert_conversion(self, markdown_text: str, expected: str): + """Override in subclasses to define assertion behavior""" + raise NotImplementedError("Subclasses must implement _assert_conversion") + + def test_markdown_with_inline_math(self): + """Test conversion of markdown with inline math to HTML + MathML""" + + markdown_text = ( + "What is the answer to this *question*? $$x\cdot y=z^2$$" # noqa W605 + ) + expected = ( + "

    What is the answer to this question? " + '' + "x·y=z2" + 'x\cdot y=z^2' # noqa W605 + "

    \n" + ) + + self._assert_conversion(markdown_text, expected) + + def test_block_math(self): + """Test conversion of block math""" + + markdown_text = ( + "Here's an equation:\n\n$$E = mc^2$$\n\nThat's Einstein's formula." + ) + expected = ( + "

    Here's an equation:

    \n" + '' + "E=mc2" + 'E = mc^2' + "" + "

    That's Einstein's formula.

    \n" + ) + + self._assert_conversion(markdown_text, expected) + + def test_multiline_block_math(self): + """ + Ensure a $$ … $$ block spanning multiple lines is converted to MathML + and the literal $$ delimiters are removed. This currently fails with + the buggy BLOCK_PATTERN because it stops after the first '$'. + """ + markdown_text = ( + "$$\n" + "\\begin{aligned}\n" + "a = b + c \\\\\n" + "$5 = d + e\n" + "\\end{aligned}\n" + "$$" + ) + expected = ( + '' + "a=b+c" + '$5=d+e' + '\n\\begin{aligned}\na = b + c \\\\\n$5 = d + e\n\\end{aligned}\n' + "" + ) + + self._assert_conversion(markdown_text, expected) + + def test_inline_math_with_dollar_inside(self): + """ + Ensure a $$ … $$ inline that contains an internal '$' (e.g. inside + \\text{}) is parsed correctly. With the old BLOCK_PATTERN the first '$' + prematurely terminates the match so the delimiters remain. + """ + markdown_text = "Test this $$\\text{Cost = 1.00 $USD$}$$" + expected = ( + "

    Test this " + '' + "Cost = 1.00 $USD$" + '\\text{Cost = 1.00 $USD$}' + "

    \n" + ) + + self._assert_conversion(markdown_text, expected) + + def test_multiple_math_expressions(self): + """Test multiple math expressions in one document""" + + markdown_text = "First: $$a + b$$, then $$c \\times d$$, finally $$e^f$$." + expected = ( + "

    First: " + 'a+b' + 'a + b' + ", then " + 'c×d' + 'c \\times d' + ", finally " + 'ef' + 'e^f' + ".

    \n" + ) + + self._assert_conversion(markdown_text, expected) + + def test_mixed_inline_and_block(self): + """Test document with both inline and block math""" + + markdown_text = ( + "This is inline math: $$a = b$$\n\n" + "And this is block math:\n\n" + "$$\\sum_{i=1}^{n} x_i = y$$\n\n" + "Back to text with more inline: $$z^2$$" + ) + expected = ( + "

    This is inline math: " + 'a=b' + 'a = b' + "

    \n" + "

    And this is block math:

    \n" + '' + "i=1" + "nxi=y" + '\sum_{i=1}^{n} x_i = y' # noqa W605 + "" + "

    Back to text with more inline: " + 'z2' + 'z^2' + "

    \n" + ) + + self._assert_conversion(markdown_text, expected) + + def test_no_math_content(self): + """Test that regular markdown without math still works""" + + markdown_text = "This is just *regular* markdown with **bold** text." + expected = "

    This is just regular markdown with bold text.

    \n" + + self._assert_conversion(markdown_text, expected) + + def test_simple_inline_math(self): + """Test simple inline math expression""" + + markdown_text = "The variable $$x$$ is unknown." + expected = ( + "

    The variable " + 'x' + 'x' + " is unknown.

    \n" + ) + + self._assert_conversion(markdown_text, expected) + + def test_simple_block_math(self): + """Test simple block math expression""" + + markdown_text = "$$y = mx + b$$" + expected = ( + '' + "y=mx+b" + 'y = mx + b' + "" + ) + + self._assert_conversion(markdown_text, expected) + + def test_mo_accented(self): + """Regression test for missed experimental property on mo tags""" + + markdown_text = "$$a_b+\\overrightarrow{abc}+\\overleftarrow{abc}\\div\\surd\\overline{abc}$$" + expected = ( + '' + "ab+abc+" + 'abc÷ab' + 'c' + 'a_b+\\overrightarrow{abc}+\\overleftarrow{abc}\\div\\surd\\overline{abc}' + "" + ) + + self._assert_conversion(markdown_text, expected) + + +class TestTexMathPlugin(TexMathTestMixin, unittest.TestCase): + """Test direct markdown conversion: markdown → HTML+MathML""" + + def _assert_conversion(self, markdown_text: str, expected: str): + """Test direct markdown to HTML+MathML conversion""" + result = render_markdown(markdown_text) + self.assertEqual(result, expected) + + +class TestTexMathPluginRoundtrip(TexMathTestMixin, unittest.TestCase): + """Test full roundtrip: markdown → HTML+MathML → Pydantic → string""" + + maxDiff = None + + def _assert_conversion(self, markdown_text: str, expected: str): + """Test full roundtrip conversion via Pydantic objects""" + result = render_markdown(markdown_text) + + # Parse to Pydantic objects and back to string + parsed = ElementTreeBase.from_string(result) + roundtrip_result = ( + "".join(e.to_xml_string().strip() for e in parsed) + if isinstance(parsed, list) + else parsed.to_xml_string().strip() + ) + self.assertEqual( + roundtrip_result.replace("\n", "").strip(), + expected.replace("\n", "").strip(), + ) diff --git a/contentcuration/contentcuration/tests/utils/test_nodes.py b/contentcuration/contentcuration/tests/utils/test_nodes.py index be43d295dd..06c59eacbf 100644 --- a/contentcuration/contentcuration/tests/utils/test_nodes.py +++ b/contentcuration/contentcuration/tests/utils/test_nodes.py @@ -1,4 +1,5 @@ import datetime +import uuid from time import sleep import mock @@ -7,10 +8,15 @@ from django.db.models import F from django.db.models import Max from django.test import SimpleTestCase +from le_utils.constants import content_kinds +from le_utils.constants import format_presets from ..base import StudioTestCase +from contentcuration.models import File +from contentcuration.tests import testdata from contentcuration.tests.helpers import mock_class_instance from contentcuration.utils.nodes import calculate_resource_size +from contentcuration.utils.nodes import generate_diff from contentcuration.utils.nodes import ResourceSizeHelper from contentcuration.utils.nodes import SlowCalculationError from contentcuration.utils.nodes import STALE_MAX_CALCULATION_SIZE @@ -27,13 +33,15 @@ def test_get_size(self): def test_get_size__root_node_simplification(self): self.assertEqual(10, self.helper.get_size()) - with mock.patch.object(self.root, 'is_root_node') as is_root_node: + with mock.patch.object(self.root, "is_root_node") as is_root_node: is_root_node.return_value = False self.assertEqual(10, self.helper.get_size()) @pytest.mark.skip def test_modified_since(self): - max_modified = self.helper.queryset.aggregate(max_modified=Max(F('modified')))['max_modified'] + max_modified = self.helper.queryset.aggregate(max_modified=Max(F("modified")))[ + "max_modified" + ] before_max = max_modified - datetime.timedelta(seconds=1) after_max = max_modified + datetime.timedelta(seconds=1) self.assertTrue(self.helper.modified_since(before_max.isoformat())) @@ -49,7 +57,7 @@ def setUp(self): def assertCalculation(self, cache, helper, force=False): helper().get_size.return_value = 456 - now_val = isoparse('2021-01-01T00:00:00') + now_val = isoparse("2021-01-01T00:00:00") with mock.patch("contentcuration.utils.nodes.timezone.now") as now: now.return_value = now_val size, stale = calculate_resource_size(self.node, force=force) @@ -60,7 +68,7 @@ def assertCalculation(self, cache, helper, force=False): def test_cached(self, cache, helper): cache().get_size.return_value = 123 - cache().get_modified.return_value = '2021-01-01 00:00:00' + cache().get_modified.return_value = "2021-01-01 00:00:00" helper().modified_since.return_value = False size, stale = calculate_resource_size(self.node) self.assertEqual(123, size) @@ -69,7 +77,7 @@ def test_cached(self, cache, helper): def test_stale__too_big__no_force(self, cache, helper): self.node.get_descendant_count.return_value = STALE_MAX_CALCULATION_SIZE + 1 cache().get_size.return_value = 123 - cache().get_modified.return_value = '2021-01-01 00:00:00' + cache().get_modified.return_value = "2021-01-01 00:00:00" helper().modified_since.return_value = True size, stale = calculate_resource_size(self.node) self.assertEqual(123, size) @@ -109,10 +117,15 @@ def db_get_size(): helper().get_size.side_effect = db_get_size - with mock.patch("contentcuration.utils.nodes.report_exception") as report_exception, \ - mock.patch("contentcuration.utils.nodes.SLOW_UNFORCED_CALC_THRESHOLD", 1): + with mock.patch( + "contentcuration.utils.nodes.report_exception" + ) as report_exception, mock.patch( + "contentcuration.utils.nodes.SLOW_UNFORCED_CALC_THRESHOLD", 1 + ): self.assertCalculation(cache, helper) - self.assertIsInstance(report_exception.mock_calls[0][1][0], SlowCalculationError) + self.assertIsInstance( + report_exception.mock_calls[0][1][0], SlowCalculationError + ) class CalculateResourceSizeIntegrationTestCase(StudioTestCase): @@ -133,3 +146,269 @@ def test_small(self): size, stale = calculate_resource_size(self.root) self.assertEqual(10, size) self.assertFalse(stale) + + +class GenerateTreesDiffTestCase(StudioTestCase): + def setUp(self): + super(GenerateTreesDiffTestCase, self).setUpBase() + self.channel.staging_tree = self.channel.main_tree.copy() + self.channel.save() + + self.main_tree = self.channel.main_tree + self.staging_tree = self.channel.staging_tree + + def _get_stat(self, diff, stat_name): + """ + Helper function to get a specific stat from the diff. + """ + for stat in diff.get("stats", []): + if stat.get("field") == stat_name: + return stat + raise ValueError(f"Stat '{stat_name}' not found in diff.") + + def _create_dummy_files( + self, + contentnode=None, + assessment_item=None, + file_size=1000, + num_files=1, + preset=None, + ): + """ + Helper function to create a file associated with a content node or assessment item. + """ + for _ in range(num_files): + file = File.objects.create( + file_size=file_size, + preset_id=preset, + contentnode=contentnode, + assessment_item=assessment_item, + ) + file.save() + + def _create_dummy_resources(self, count=1, kind=content_kinds.VIDEO, parent=None): + """ + Helper function to create dummy resources under a given parent node. + """ + num_children = parent.get_children().count() if parent else 0 + for i in range(count): + testdata.node( + { + "kind_id": kind, + "title": f"Test {kind.capitalize()} {i}", + "sort_order": num_children + i + 1, + }, + parent=parent, + ) + + def _create_dummy_exercise(self, count=1, parent=None, num_assesments=1): + """ + Helper function to create dummy exercises with a specified number of assessment items. + """ + num_children = parent.get_children().count() if parent else 0 + for i in range(count): + testdata.node( + { + "kind_id": content_kinds.EXERCISE, + "mastery_model": "do_all", + "title": f"Test Exercise {i}", + "sort_order": num_children + i + 1, + "assessment_items": [ + { + "type": "single_selection", + "question": f"Question {j + 1}?", + "assessment_id": uuid.uuid4(), + "answers": [ + { + "answer": f"Answer {k + 1}", + "correct": k == 0, # First answer is correct + "help_text": "", + } + for k in range(2) + ], + } + for j in range(num_assesments) + ], + }, + parent=parent, + ) + + def test_generate_diff_for_same_tree(self): + diff = generate_diff(self.main_tree.id, self.main_tree.id) + stats = diff.get("stats", []) + for stat in stats: + self.assertTrue(stat["original"] == stat["changed"]) + + def test_generate_diff_for_equal_trees(self): + diff = generate_diff(self.main_tree.id, self.staging_tree.id) + stats = diff.get("stats", []) + for stat in stats: + if stat["field"] == "date_created": + # date_created is not expected to be the same + continue + + self.assertTrue(stat["original"] == stat["changed"]) + + def test_generate_diff_for_resources_files_sizes(self): + count_new_files = 3 + count_size_per_file = 1000 + + staging_video_resource = ( + self.staging_tree.get_descendants().filter(kind=content_kinds.VIDEO).first() + ) + self._create_dummy_files( + contentnode=staging_video_resource, + file_size=count_size_per_file, + num_files=count_new_files, + ) + + # How many new files were added times the size of each file + expected_difference = count_new_files * count_size_per_file + + diff = generate_diff(self.staging_tree.id, self.main_tree.id) + file_size_in_bytes_stat = self._get_stat(diff, "file_size_in_bytes") + + self.assertEqual(file_size_in_bytes_stat.get("difference"), expected_difference) + + def test_generate_diff_for_assesments_files_sizes(self): + count_new_files = 3 + count_size_per_file = 1000 + + staging_exercise_resource = ( + self.staging_tree.get_descendants() + .filter(kind=content_kinds.EXERCISE) + .first() + ) + staging_assessment_item = staging_exercise_resource.assessment_items.first() + + self._create_dummy_files( + assessment_item=staging_assessment_item, + file_size=count_size_per_file, + num_files=count_new_files, + ) + + # How many new files were added times the size of each file + expected_difference = count_new_files * count_size_per_file + + diff = generate_diff(self.staging_tree.id, self.main_tree.id) + file_size_in_bytes_stat = self._get_stat(diff, "file_size_in_bytes") + + self.assertEqual(file_size_in_bytes_stat.get("difference"), expected_difference) + + def test_generate_diff_for_all_files_sizes(self): + count_new_files = 3 + count_size_per_file = 1000 + + staging_exercise_resource = ( + self.staging_tree.get_descendants() + .filter(kind=content_kinds.EXERCISE) + .first() + ) + staging_assessment_item = staging_exercise_resource.assessment_items.first() + + self._create_dummy_files( + contentnode=staging_exercise_resource, + file_size=count_size_per_file, + num_files=count_new_files, + ) + + self._create_dummy_files( + assessment_item=staging_assessment_item, + file_size=count_size_per_file, + num_files=count_new_files, + ) + + resource_files_size = count_new_files * count_size_per_file + assessment_files_size = count_new_files * count_size_per_file + + expected_difference = resource_files_size + assessment_files_size + + diff = generate_diff(self.staging_tree.id, self.main_tree.id) + file_size_in_bytes_stat = self._get_stat(diff, "file_size_in_bytes") + + self.assertEqual(file_size_in_bytes_stat.get("difference"), expected_difference) + + def test_generate_diff_for_num_resources(self): + # Creating files just to test that it doesnt affect the num_resources stat + count_new_files = 4 + staging_exercise_resource = ( + self.staging_tree.get_descendants() + .filter(kind=content_kinds.EXERCISE) + .first() + ) + staging_assessment_item = staging_exercise_resource.assessment_items.first() + self._create_dummy_files( + contentnode=staging_exercise_resource, + file_size=1000, + num_files=count_new_files, + ) + self._create_dummy_files( + assessment_item=staging_assessment_item, + file_size=1000, + num_files=count_new_files, + ) + + count_new_resources = 5 + self._create_dummy_resources( + count=count_new_resources, + kind=content_kinds.VIDEO, + parent=self.staging_tree, + ) + + diff = generate_diff(self.staging_tree.id, self.main_tree.id) + count_resources_stat = self._get_stat(diff, "count_resources") + + self.assertEqual(count_resources_stat.get("difference"), count_new_resources) + + def test_generate_diff_for_num_assessment_items(self): + count_new_exercises = 3 + count_assessment_items_per_exercise = 2 + + self._create_dummy_exercise( + count=count_new_exercises, + parent=self.staging_tree, + num_assesments=count_assessment_items_per_exercise, + ) + + expected_difference = count_new_exercises * count_assessment_items_per_exercise + + diff = generate_diff(self.staging_tree.id, self.main_tree.id) + count_questions_stat = self._get_stat(diff, "count_questions") + self.assertEqual(count_questions_stat.get("difference"), expected_difference) + + def test_generate_diff_for_num_subtitle_files(self): + count_new_subtitle_files = 3 + staging_video_resource = ( + self.staging_tree.get_descendants().filter(kind=content_kinds.VIDEO).first() + ) + + for i in range(count_new_subtitle_files): + self._create_dummy_files( + contentnode=staging_video_resource, + file_size=1000, + num_files=1, + preset=format_presets.VIDEO_SUBTITLE, + ) + + diff = generate_diff(self.staging_tree.id, self.main_tree.id) + count_subtitles_stat = self._get_stat(diff, "count_subtitles") + + self.assertEqual( + count_subtitles_stat.get("difference"), count_new_subtitle_files + ) + + def test_generate_diff_for_resources_types(self): + new_resources = { + content_kinds.VIDEO: 3, + content_kinds.TOPIC: 2, + content_kinds.EXERCISE: 1, + } + for kind, count in new_resources.items(): + self._create_dummy_resources( + count=count, kind=kind, parent=self.staging_tree + ) + diff = generate_diff(self.staging_tree.id, self.main_tree.id) + for kind, name in content_kinds.choices: + stat = self._get_stat(diff, f"count_{kind}s") + expected_count = new_resources.get(kind, 0) + self.assertEqual(stat.get("difference"), expected_count) diff --git a/contentcuration/contentcuration/tests/utils/test_recommendations.py b/contentcuration/contentcuration/tests/utils/test_recommendations.py index d410651de1..c64e6ef489 100644 --- a/contentcuration/contentcuration/tests/utils/test_recommendations.py +++ b/contentcuration/contentcuration/tests/utils/test_recommendations.py @@ -1,10 +1,585 @@ +import copy +import uuid + +from automation.models import RecommendationsCache +from automation.utils.appnexus import errors +from automation.utils.appnexus.base import BackendResponse from django.test import TestCase +from kolibri_public.models import ContentNode as PublicContentNode +from mock import MagicMock +from mock import patch +from contentcuration.models import Channel +from contentcuration.models import ContentNode +from contentcuration.tests import testdata +from contentcuration.tests.base import StudioTestCase +from contentcuration.utils.recommendations import EmbeddingsResponse +from contentcuration.utils.recommendations import EmbedTopicsRequest from contentcuration.utils.recommendations import Recommendations +from contentcuration.utils.recommendations import RecommendationsAdapter +from contentcuration.utils.recommendations import RecommendationsBackendFactory +from contentcuration.utils.recommendations import RecommendationsResponse class RecommendationsTestCase(TestCase): def test_backend_initialization(self): - recomendations = Recommendations() - self.assertIsNotNone(recomendations) - self.assertIsInstance(recomendations.get_instance(), Recommendations) + recommendations = Recommendations() + self.assertIsNotNone(recommendations) + self.assertIsInstance(recommendations, Recommendations) + + +class RecommendationsAdapterTestCase(StudioTestCase): + @classmethod + def setUpClass(cls): + super(RecommendationsAdapterTestCase, cls).setUpClass() + + cls.channel_1 = Channel.objects.create( + id="1234567890abcdef1234567890abcdef", + name="Channel 1", + actor_id=cls.admin_user.id, + ) + cls.channel_2 = Channel.objects.create( + id="abcdef1234567890abcdef1234567890", + name="Channel 2", + actor_id=cls.admin_user.id, + ) + + @classmethod + def setUpTestData(cls): + cls.adapter = RecommendationsAdapter(MagicMock()) + cls.request_data = { + "topics": [ + { + "id": "topic_id", + "title": "topic_title", + "description": "topic_description", + "language": "en", + "ancestors": [ + { + "id": "ancestor_id", + "title": "ancestor_title", + "description": "ancestor_description", + } + ], + } + ], + "metadata": {"channel_id": "00000000000000000000000000000010"}, + } + cls.channel_id = "test_channel_id" + cls.resources = [MagicMock(spec=ContentNode)] + + cls.request = EmbedTopicsRequest( + method="POST", + url="http://test.com", + path="/test/path", + params={"override_threshold": False}, + json=cls.request_data, + ) + cls.api_response = BackendResponse( + data={ + "topics": [ + { + "id": "abcdef1234567890abcdef1234567890", + "recommendations": [ + { + "id": "abcdef1234567890abcdef1234567890", + "channel_id": "abcdef1234567890abcdef1234567890", + "rank": 8, + } + ], + }, + { + "id": "1234567890abcdef1234567890abcdef", + "recommendations": [ + { + "id": "1234567890abcdef1234567890abcdef", + "channel_id": "1234567890abcdef1234567890abcdef", + "rank": 9, + } + ], + }, + ] + } + ) + + PublicContentNode.objects.create( + id="1234567890abcdef1234567890abcdef", + title="Public Content Node 1", + content_id=uuid.uuid4().hex, + channel_id="ddec09d74e834241a580c480ee37879c", + ) + PublicContentNode.objects.create( + id="abcdef1234567890abcdef1234567890", + title="Public Content Node 2", + content_id=uuid.uuid4().hex, + channel_id="84fcaec1e0514b62899d7f436384c401", + ) + + def assert_backend_call( + self, + mock_response_exists, + response_exists_value, + connect_value, + make_request_value, + method, + *args, + ): + mock_response_exists.return_value = response_exists_value + self.adapter.backend.connect.return_value = connect_value + self.adapter.backend.make_request.return_value = make_request_value + + if response_exists_value: + result = method(*args) + mock_response_exists.assert_called_once() + self.adapter.backend.connect.assert_not_called() + self.adapter.backend.make_request.assert_not_called() + return result + else: + if connect_value: + result = method(*args) + self.adapter.backend.connect.assert_called_once() + self.adapter.backend.make_request.assert_called_once() + return result + else: + with self.assertRaises(errors.ConnectionError): + method(*args) + self.adapter.backend.connect.assert_called_once() + self.adapter.backend.make_request.assert_not_called() + + def test_adapter_initialization(self): + self.assertIsNotNone(self.adapter) + self.assertIsInstance(self.adapter, RecommendationsAdapter) + + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter.response_exists" + ) + def test_generate_embeddings_connect_failure(self, mock_response_exists): + mock_response = MagicMock(spec=EmbeddingsResponse) + self.assert_backend_call( + mock_response_exists, + None, + False, + mock_response, + self.adapter.generate_embeddings, + self.request, + ) + + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter.response_exists" + ) + def test_generate_embeddings(self, mock_response_exists): + mock_response = MagicMock(spec=EmbeddingsResponse) + mock_response.error = None + response = self.assert_backend_call( + mock_response_exists, + None, + True, + mock_response, + self.adapter.generate_embeddings, + self.request, + ) + self.assertIsInstance(response, EmbeddingsResponse) + + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter.response_exists" + ) + def test_generate_embeddings_failure(self, mock_response_exists): + mock_response = MagicMock(spec=EmbeddingsResponse) + mock_response.error = {} + response = self.assert_backend_call( + mock_response_exists, + None, + True, + mock_response, + self.adapter.generate_embeddings, + self.request, + ) + self.assertIsInstance(response, EmbeddingsResponse) + self.assertIsNotNone(response.error) + + def test_response_exists(self): + cached = self.adapter.cache_embeddings_request(self.request, self.api_response) + self.assertTrue(cached) + + response = self.adapter.response_exists(self.request) + self.assertIsNotNone(response) + self.assertIsInstance(response, EmbeddingsResponse) + self.assertDictEqual(response.data, self.api_response.data) + + def test_response_does_not_exist(self): + new_request = EmbedTopicsRequest( + method="POST", + url="http://test.com", + path="/test/path", + params={"override_threshold": True}, + json=[ + { + "id": "topic_id", + "title": "topic_title", + "description": "topic_description", + } + ], + ) + response = self.adapter.response_exists(new_request) + self.assertIsNone(response) + + def cache_request_test_helper(self, request_json, response_data, expected_count): + new_request = copy.deepcopy(self.request) + new_request.json = request_json + + result = self.adapter.cache_embeddings_request(new_request, response_data) + self.assertTrue(result) + + cached_items = RecommendationsCache.objects.filter( + request_hash=self.adapter._generate_request_hash(new_request) + ) + self.assertEqual(cached_items.count(), expected_count) + + def test_cache_embeddings_request_success(self): + request_json = { + "topics": [ + { + "id": "topic_id", + "title": "topic_title", + "description": "topic_description", + } + ], + "metadata": {}, + } + self.cache_request_test_helper(request_json, self.api_response, 2) + + def test_cache_embeddings_request_empty_data(self): + request_json = { + "topics": [ + { + "id": "topic_id", + "title": "topic_title", + "description": "topic_description", + } + ], + "metadata": {}, + } + self.cache_request_test_helper(request_json, {}, 0) + + def test_cache_embeddings_request_ignore_duplicates(self): + request_json = { + "topics": [ + { + "id": "topic_id", + "title": "topic_title", + "description": "topic_description", + } + ], + "metadata": {}, + } + duplicate_data = BackendResponse( + data={ + "topics": [ + { + "id": "1234567890abcdef1234567890abcdef", + "recommendations": [ + { + "id": "1234567890abcdef1234567890abcdef", + "channel_id": "1234567890abcdef1234567890abcdef", + "rank": 1, + } + ], + }, + { + "id": "1234567890abcdef1234567890abcdef", + "recommendations": [ + { + "id": "1234567890abcdef1234567890abcdef", + "channel_id": "1234567890abcdef1234567890abcdef", + "rank": 2, + } + ], + }, + ] + } + ) + self.cache_request_test_helper(request_json, duplicate_data, 1) + + def test_cache_embeddings_request_invalid_data(self): + invalid_data = BackendResponse( + data={ + "response": [ + {"node_id": "1234567890abcdef1234567890abcdee", "rank": 0.6} + ] + } + ) + self.cache_request_test_helper([{"topic": "new_test_topic_4"}], invalid_data, 0) + + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter.cache_embeddings_request" + ) + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter.generate_embeddings" + ) + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter.response_exists" + ) + @patch("contentcuration.utils.recommendations.EmbedTopicsRequest") + def test_get_recommendations_success( + self, + mock_embed_topics_request, + mock_response_exists, + mock_generate_embeddings, + mock_cache_embeddings_request, + ): + channel = testdata.channel("Public Channel") + channel.public = True + channel.save() + + public_node_1 = PublicContentNode.objects.create( + id="00000000000000000000000000000003", + title="Video 1", + content_id=uuid.uuid4().hex, + channel_id=channel.id, + ) + public_node_2 = PublicContentNode.objects.create( + id="00000000000000000000000000000005", + title="Exercise 1", + content_id=uuid.uuid4().hex, + channel_id=channel.id, + ) + + response_data = { + "topics": [ + { + "id": "00000000000000000000000000000003", + "recommendations": [ + { + "id": "00000000000000000000000000000003", + "channel_id": "00000000000000000000000000000003", + "rank": 10, + } + ], + }, + { + "id": "00000000000000000000000000000005", + "recommendations": [ + { + "id": "00000000000000000000000000000005", + "channel_id": "00000000000000000000000000000005", + "rank": 11, + } + ], + }, + ] + } + + mock_response_exists.return_value = None + mock_response = MagicMock(spec=EmbeddingsResponse) + mock_response.data = response_data + mock_response.error = None + mock_response.get = lambda key, default=None: getattr( + mock_response, key, default + ) + mock_generate_embeddings.return_value = mock_response + + response = self.adapter.get_recommendations(self.request_data) + results = list(response.results) + expected_node_ids = [public_node_1.id, public_node_2.id] + actual_node_ids = [result["node_id"] for result in results] + + mock_response_exists.assert_called_once() + mock_generate_embeddings.assert_called_once() + self.assertIsInstance(response, RecommendationsResponse) + self.assertListEqual(expected_node_ids, actual_node_ids) + self.assertEqual(len(results), 2) + + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter._flatten_response" + ) + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter.response_exists" + ) + @patch("contentcuration.utils.recommendations.EmbedTopicsRequest") + def test_get_recommendations_failure( + self, mock_embed_topics_request, mock_response_exists, mock_flatten_response + ): + mock_request_instance = MagicMock(spec=EmbedTopicsRequest) + mock_embed_topics_request.return_value = mock_request_instance + + self.assert_backend_call( + mock_response_exists, + None, + False, + None, + self.adapter.get_recommendations, + self.request_data, + ) + + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter._flatten_response" + ) + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter.response_exists" + ) + @patch("contentcuration.utils.recommendations.EmbedContentRequest") + def test_embed_content_success( + self, mock_embed_topics_request, mock_response_exists, mock_flatten_response + ): + mock_response = MagicMock(spec=EmbeddingsResponse) + mock_response.error = None + response = self.assert_backend_call( + mock_response_exists, + None, + True, + mock_response, + self.adapter.embed_content, + self.channel_id, + self.resources, + ) + self.assertIsInstance(response, bool) + self.assertTrue(response) + + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter.response_exists" + ) + @patch("contentcuration.utils.recommendations.EmbedContentRequest") + def test_embed_content_failure( + self, mock_embed_topics_request, mock_response_exists + ): + response = self.assert_backend_call( + mock_response_exists, + None, + False, + None, + self.adapter.embed_content, + self.channel_id, + self.resources, + ) + + self.assertIsNone(response) + + def extract_content_test_helper( + self, mock_node, file_return_value, expected_result + ): + with patch( + "contentcuration.utils.recommendations.File.objects.filter", + return_value=file_return_value, + ): + result = self.adapter.extract_content(mock_node) + self.assertEqual(result, expected_result) + + def test_extract_content(self): + mock_node = MagicMock(spec=ContentNode) + mock_node.node_id = "1234567890abcdef1234567890abcdef" + mock_node.title = "Sample Title" + mock_node.description = "Sample Description" + mock_node.language.lang_code = "en" + mock_node.kind.kind = "video" + + mock_file_instance = MagicMock() + mock_file_instance.file_on_disk = "path/to/file.mp4" + mock_file_instance.preset_id = "video_high_res" + mock_file_instance.language.lang_code = "en" + + expected_result = { + "id": "1234567890abcdef1234567890abcdef", + "title": "Sample Title", + "description": "Sample Description", + "text": "", + "language": "en", + "files": [ + { + "url": "path/to/file.mp4", + "preset": "video_high_res", + "language": "en", + } + ], + } + self.extract_content_test_helper( + mock_node, [mock_file_instance], expected_result + ) + + def test_extract_content_no_files(self): + mock_node = MagicMock(spec=ContentNode) + mock_node.node_id = "1234567890abcdef1234567890abcdef" + mock_node.title = "Sample Title" + mock_node.description = "Sample Description" + mock_node.language.lang_code = "en" + mock_node.kind.kind = "video" + + expected_result = { + "id": "1234567890abcdef1234567890abcdef", + "title": "Sample Title", + "description": "Sample Description", + "text": "", + "language": "en", + "files": [], + } + self.extract_content_test_helper(mock_node, [], expected_result) + + +class RecommendationsBackendFactoryTestCases(TestCase): + def setUp(self): + self.factory = RecommendationsBackendFactory() + + def test_prepare_url_with_no_scheme(self): + url = "example.com:8080" + result = self.factory._prepare_url(url) + self.assertEqual(result, f"http://{url}") + + def test_prepare_url_with_no_port(self): + url = "http://example.com" + result = self.factory._prepare_url(url) + self.assertEqual(result, f"{url}:8000") + + def test_prepare_url_with_http(self): + url = "http://example.com:8080" + result = self.factory._prepare_url(url) + self.assertEqual(result, url) + + def test_prepare_url_with_https(self): + url = "https://example.com:443" + result = self.factory._prepare_url(url) + self.assertEqual(result, url) + + def test_prepare_url_with_empty_url(self): + url = "" + result = self.factory._prepare_url(url) + self.assertEqual(result, url) + + def test_prepare_url_with_none(self): + url = None + result = self.factory._prepare_url(url) + self.assertEqual(result, url) + + @patch("contentcuration.utils.recommendations.settings") + def test_create_backend_with_url_no_scheme(self, mock_settings): + mock_settings.CURRICULUM_AUTOMATION_API_URL = "api.example.com" + backend = self.factory.create_backend() + + self.assertIsInstance(backend, Recommendations) + self.assertEqual(backend.base_url, "http://api.example.com:8000") + self.assertEqual(backend.connect_endpoint, "/connect") + + @patch("contentcuration.utils.recommendations.settings") + def test_create_backend_with_url_with_scheme(self, mock_settings): + mock_settings.CURRICULUM_AUTOMATION_API_URL = "https://api.example.com" + backend = self.factory.create_backend() + + self.assertIsInstance(backend, Recommendations) + self.assertEqual(backend.base_url, "https://api.example.com:8000") + self.assertEqual(backend.connect_endpoint, "/connect") + + @patch("contentcuration.utils.recommendations.settings") + def test_create_backend_with_empty_url(self, mock_settings): + mock_settings.CURRICULUM_AUTOMATION_API_URL = "" + backend = self.factory.create_backend() + + self.assertIsInstance(backend, Recommendations) + self.assertEqual(backend.base_url, "") + self.assertEqual(backend.connect_endpoint, "/connect") + + @patch("contentcuration.utils.recommendations.settings") + def test_create_backend_with_no_url(self, mock_settings): + mock_settings.CURRICULUM_AUTOMATION_API_URL = None + backend = self.factory.create_backend() + + self.assertIsInstance(backend, Recommendations) + self.assertEqual(backend.base_url, None) + self.assertEqual(backend.connect_endpoint, "/connect") diff --git a/contentcuration/contentcuration/tests/views/test_nodes.py b/contentcuration/contentcuration/tests/views/test_nodes.py index a981b84ca7..7099fc1a76 100644 --- a/contentcuration/contentcuration/tests/views/test_nodes.py +++ b/contentcuration/contentcuration/tests/views/test_nodes.py @@ -20,22 +20,30 @@ def tearDown(self): cache.clear() def test_get_node_diff__missing_contentnode(self): - response = self.get(reverse("get_node_diff", kwargs=dict(updated_id="abc123", original_id="def456"))) + response = self.get( + reverse( + "get_node_diff", kwargs=dict(updated_id="abc123", original_id="def456") + ) + ) self.assertEqual(response.status_code, 404) def test_get_node_diff__no_task_processing(self): pk = self.channel.main_tree.pk - response = self.get(reverse("get_node_diff", kwargs=dict(updated_id=pk, original_id=pk))) + response = self.get( + reverse("get_node_diff", kwargs=dict(updated_id=pk, original_id=pk)) + ) self.assertEqual(response.status_code, 404) - @patch.object(generatenodediff_task, 'find_incomplete_ids') + @patch.object(generatenodediff_task, "find_incomplete_ids") def test_get_node_diff__task_processing(self, mock_find_incomplete_ids): qs = Mock(spec="django.db.models.query.QuerySet") mock_find_incomplete_ids.return_value = qs() mock_find_incomplete_ids.return_value.exists.return_value = True pk = self.channel.main_tree.pk - response = self.get(reverse("get_node_diff", kwargs=dict(updated_id=pk, original_id=pk))) + response = self.get( + reverse("get_node_diff", kwargs=dict(updated_id=pk, original_id=pk)) + ) self.assertEqual(response.status_code, 302) @@ -45,7 +53,7 @@ def setUp(self): self.default_details = { "resource_count": 5, "resource_size": 100, - "kind_count": {"document": 3, "video": 2} + "kind_count": {"document": 3, "video": 2}, } # see tree.json for where this comes from self.node = ContentNode.objects.get(node_id="00000000000000000000000000000001") @@ -59,7 +67,11 @@ def tearDown(self): def _set_cache(self, node, last_update=None): data = self.default_details.copy() if last_update is not None: - data.update(last_update=pytz.utc.localize(last_update).strftime(settings.DATE_TIME_FORMAT)) + data.update( + last_update=pytz.utc.localize(last_update).strftime( + settings.DATE_TIME_FORMAT + ) + ) cache_key = "details_{}".format(node.node_id) cache.set(cache_key, json.dumps(data)) @@ -67,9 +79,11 @@ def _set_cache(self, node, last_update=None): @contextmanager def _check_details(self, node=None): endpoint = "get_channel_details" if node is None else "get_node_details" - param = {"channel_id": self.channel.id} \ - if endpoint == "get_channel_details" \ + param = ( + {"channel_id": self.channel.id} + if endpoint == "get_channel_details" else {"node_id": node.id} + ) url = reverse(endpoint, kwargs=param) response = self.get(url) print(response.content) @@ -77,16 +91,16 @@ def _check_details(self, node=None): yield details def assertDetailsEqual(self, details, expected): - self.assertEqual(details['resource_count'], expected['resource_count']) - self.assertEqual(details['resource_size'], expected['resource_size']) - self.assertEqual(details['kind_count'], expected['kind_count']) + self.assertEqual(details["resource_count"], expected["resource_count"]) + self.assertEqual(details["resource_size"], expected["resource_size"]) + self.assertEqual(details["kind_count"], expected["kind_count"]) @patch("contentcuration.models.ContentNode.get_details") def test_get_channel_details__uncached(self, mock_get_details): mock_get_details.return_value = { "resource_count": 7, "resource_size": 200, - "kind_count": {"document": 33, "video": 22} + "kind_count": {"document": 33, "video": 22}, } with self._check_details() as details: self.assertDetailsEqual(details, mock_get_details.return_value) @@ -97,19 +111,25 @@ def test_get_channel_details__uncached(self, mock_get_details): def test_get_channel_details__cached(self, task_mock): # force the cache to update by adding a very old cache entry. Since Celery tasks run sync in the test suite, # get_channel_details will return an updated cache value rather than generate it async. - self._set_cache(self.channel.main_tree, last_update=datetime.datetime(1990, 1, 1)) + self._set_cache( + self.channel.main_tree, last_update=datetime.datetime(1990, 1, 1) + ) with self._check_details() as details: # check cache was returned self.assertDetailsEqual(details, self.default_details) # Check that the outdated cache prompts an asynchronous cache update - task_mock.enqueue.assert_called_once_with(self.user, node_id=self.channel.main_tree.id) + task_mock.enqueue.assert_called_once_with( + self.user, node_id=self.channel.main_tree.id + ) @patch("contentcuration.views.nodes.getnodedetails_task") def test_get_channel_details__cached__not_updated__no_enqueue(self, task_mock): # nothing changed, self.channel.main_tree.get_descendants(include_self=False).update(changed=False) - self._set_cache(self.channel.main_tree, last_update=datetime.datetime(1990, 1, 1)) + self._set_cache( + self.channel.main_tree, last_update=datetime.datetime(1990, 1, 1) + ) with self._check_details() as details: # check cache was returned @@ -119,7 +139,9 @@ def test_get_channel_details__cached__not_updated__no_enqueue(self, task_mock): @patch("contentcuration.views.nodes.getnodedetails_task") def test_get_channel_details__cached__no_enqueue(self, task_mock): # test last update handling - self._set_cache(self.channel.main_tree, last_update=datetime.datetime(2099, 1, 1)) + self._set_cache( + self.channel.main_tree, last_update=datetime.datetime(2099, 1, 1) + ) with self._check_details() as details: # check cache was returned @@ -131,7 +153,7 @@ def test_get_node_details__uncached(self, mock_get_details): mock_get_details.return_value = { "resource_count": 7, "resource_size": 200, - "kind_count": {"document": 33, "video": 22} + "kind_count": {"document": 33, "video": 22}, } with self._check_details(node=self.node) as details: self.assertDetailsEqual(details, mock_get_details.return_value) diff --git a/contentcuration/contentcuration/tests/views/test_settings.py b/contentcuration/contentcuration/tests/views/test_settings.py index 7cf3145e95..ed23fb0d70 100644 --- a/contentcuration/contentcuration/tests/views/test_settings.py +++ b/contentcuration/contentcuration/tests/views/test_settings.py @@ -1,15 +1,14 @@ -from mock import mock - -from django.template.loader import render_to_string from django.conf import settings as ccsettings +from django.template.loader import render_to_string +from mock import mock +from contentcuration.forms import StorageRequestForm from contentcuration.tests import testdata from contentcuration.tests.base import StudioAPITestCase from contentcuration.views.settings import StorageSettingsView -from contentcuration.forms import StorageRequestForm -class StorageSettingsViewTestCase(StudioAPITestCase): +class StorageSettingsViewTestCase(StudioAPITestCase): def setUp(self): super(StorageSettingsViewTestCase, self).setUp() self.view = StorageSettingsView() @@ -17,7 +16,7 @@ def setUp(self): self.view.request.user = testdata.user(email="tester@tester.com") def test_storage_request(self): - + with mock.patch("contentcuration.views.settings.send_mail") as send_mail: data = dict( @@ -35,9 +34,9 @@ def test_storage_request(self): uploading_for="uploading_for", organization_type="organization_type", time_constraint="time_constraint", - message="message" + message="message", ) - self.form = StorageRequestForm(data=data) + self.form = StorageRequestForm(data=data) self.assertTrue(self.form.is_valid()) self.view.form_valid(self.form) @@ -47,7 +46,7 @@ def test_storage_request(self): { "data": self.form.cleaned_data, "user": self.view.request.user, - "channels": ["channel1", "channel2"] + "channels": ["channel1", "channel2"], }, ) diff --git a/contentcuration/contentcuration/tests/views/test_users.py b/contentcuration/contentcuration/tests/views/test_users.py index 5247bf46b7..a17da93f8a 100644 --- a/contentcuration/contentcuration/tests/views/test_users.py +++ b/contentcuration/contentcuration/tests/views/test_users.py @@ -21,16 +21,18 @@ def setUp(self): self.request = mock.Mock() self.request.method = "POST" self.user = testdata.user(email="tester@tester.com") - self.request.body = json.dumps(dict( - username="tester@tester.com", - password="password", - )) + self.request.body = json.dumps( + dict( + username="tester@tester.com", + password="password", + ) + ) def test_login__not_post(self): self.request.method = "GET" redirect = login(self.request) self.assertIsInstance(redirect, HttpResponseRedirectBase) - self.assertIn("accounts", redirect['Location']) + self.assertIn("accounts", redirect["Location"]) def test_login__not_found(self): self.user.email = "different@tester.com" @@ -52,22 +54,24 @@ def test_login__success(self, djangologin): redirect = login(self.request) djangologin.assert_called() self.assertIsInstance(redirect, HttpResponseRedirectBase) - self.assertIn("channels", redirect['Location']) + self.assertIn("channels", redirect["Location"]) def test_login__case_sensitivity(self): with mock.patch("contentcuration.views.users.djangologin") as djangologin: self.user.email = "Tester@tester.com" self.user.save() - self.request.body = json.dumps(dict( - username="tester@Tester.com", - password="password", - )) + self.request.body = json.dumps( + dict( + username="tester@Tester.com", + password="password", + ) + ) redirect = login(self.request) djangologin.assert_called() self.assertIsInstance(redirect, HttpResponseRedirectBase) - self.assertIn("channels", redirect['Location']) + self.assertIn("channels", redirect["Location"]) def test_login__case_sensitivity__multiple(self): with mock.patch("contentcuration.views.users.djangologin") as djangologin: @@ -79,27 +83,31 @@ def test_login__case_sensitivity__multiple(self): user2.set_password("tester") user2.save() - self.request.body = json.dumps(dict( - username="tester@tester.com", - password="tester", - )) + self.request.body = json.dumps( + dict( + username="tester@tester.com", + password="tester", + ) + ) redirect = login(self.request) djangologin.assert_called() self.assertIsInstance(redirect, HttpResponseRedirectBase) - self.assertIn("channels", redirect['Location']) + self.assertIn("channels", redirect["Location"]) def test_login__whitespace(self): with mock.patch("contentcuration.views.users.djangologin") as djangologin: - self.request.body = json.dumps(dict( - username="tester@Tester.com ", - password="password", - )) + self.request.body = json.dumps( + dict( + username="tester@Tester.com ", + password="password", + ) + ) redirect = login(self.request) djangologin.assert_called() self.assertIsInstance(redirect, HttpResponseRedirectBase) - self.assertIn("channels", redirect['Location']) + self.assertIn("channels", redirect["Location"]) def test_after_delete__no_login(self): with mock.patch("contentcuration.views.users.djangologin") as djangologin: @@ -161,9 +169,7 @@ def setUp(self): self.user = testdata.user(email="tester@tester.com") self.user.is_active = False self.user.save() - self.kwargs = dict( - activation_key="activation_key" - ) + self.kwargs = dict(activation_key="activation_key") def test_activate(self): self.view.validate_key.return_value = self.user.email diff --git a/contentcuration/contentcuration/tests/views/test_views_base.py b/contentcuration/contentcuration/tests/views/test_views_base.py index 8bf4b80726..41f4e56e70 100644 --- a/contentcuration/contentcuration/tests/views/test_views_base.py +++ b/contentcuration/contentcuration/tests/views/test_views_base.py @@ -36,8 +36,12 @@ def test_200_get(self): task_name="export-channel", status="QUEUED", ) - CustomTaskMetadata(task_id=task_id, user=self.user, channel_id=self.channel.id).save() - CustomTaskMetadata(task_id=task_id_2, user=self.user, channel_id=channel_2.id).save() + CustomTaskMetadata( + task_id=task_id, user=self.user, channel_id=self.channel.id + ).save() + CustomTaskMetadata( + task_id=task_id_2, user=self.user, channel_id=channel_2.id + ).save() response = self.get( reverse_lazy("publishing_status"), ) @@ -50,6 +54,10 @@ def test_200_get(self): for i, item in enumerate(response.data): self.assertEqual(expected_channel_ids[i], item["channel_id"]) - expected_task_id = task.task_id if item["channel_id"] == self.channel.id else task_2.task_id + expected_task_id = ( + task.task_id + if item["channel_id"] == self.channel.id + else task_2.task_id + ) self.assertEqual(expected_task_id, item["task_id"]) self.assertIn("performed", item) diff --git a/contentcuration/contentcuration/tests/views/test_views_internal.py b/contentcuration/contentcuration/tests/views/test_views_internal.py index 3a8f50a6d2..e43d4fdd75 100644 --- a/contentcuration/contentcuration/tests/views/test_views_internal.py +++ b/contentcuration/contentcuration/tests/views/test_views_internal.py @@ -10,7 +10,9 @@ from django.urls import reverse_lazy from le_utils.constants import content_kinds from le_utils.constants import format_presets -from le_utils.constants.labels.accessibility_categories import ACCESSIBILITYCATEGORIESLIST +from le_utils.constants.labels.accessibility_categories import ( + ACCESSIBILITYCATEGORIESLIST, +) from le_utils.constants.labels.learning_activities import LEARNINGACTIVITIESLIST from le_utils.constants.labels.levels import LEVELSLIST from le_utils.constants.labels.needs import NEEDSLIST @@ -196,9 +198,7 @@ def test_associates_file_with_created_node(self): def test_metadata_properly_created(self): node = ContentNode.objects.get(title="valid_metadata_labels") for label, values in METADATA.items(): - self.assertEqual(getattr(node, label), { - values[0]: True - }) + self.assertEqual(getattr(node, label), {values[0]: True}) @skipIf(True, "Disable until we mark nodes as incomplete rather than just warn") def test_invalid_nodes_are_not_complete(self): @@ -244,7 +244,7 @@ def test_add_nodes__not_a_topic(self): ) # should succeed self.assertEqual(response.status_code, 200, response.content) - resource_node_id = next(iter(response.json().get('root_ids').values())) + resource_node_id = next(iter(response.json().get("root_ids").values())) invalid_child = self._make_node_data() test_data = { @@ -462,7 +462,9 @@ def test_duplicate_assessment_item_returns_400_status_code(self): """ Check that we return 400 if passed in duplicate assessment items. """ - self.sample_data["content_data"][0]["questions"][1]["assessment_id"] = self.sample_data["content_data"][0]["questions"][0]["assessment_id"] + self.sample_data["content_data"][0]["questions"][1][ + "assessment_id" + ] = self.sample_data["content_data"][0]["questions"][0]["assessment_id"] response = self._make_request() # check that we returned 400 with that POST request assert response.status_code == 400, "Got a non-400 request error: {}".format( @@ -743,7 +745,7 @@ def setUp(self): "license": None, "source_domain": "unique domain", "source_id": "unique domain root", - "ricecooker_version": '0.6.46', + "ricecooker_version": "0.6.46", "extra_fields": None, "files": None, } @@ -751,7 +753,9 @@ def setUp(self): def test_401_no_permission(self): client = APIClient() response = client.post( - reverse_lazy("api_create_channel"), data={"channel_data": self.channel_data}, format="json" + reverse_lazy("api_create_channel"), + data={"channel_data": self.channel_data}, + format="json", ) self.assertEqual(response.status_code, 401) @@ -761,16 +765,22 @@ def test_returns_200_status_code(self): """ # check that we returned 200 with that POST request resp = self.admin_client().post( - reverse_lazy("api_create_channel"), data={"channel_data": self.channel_data}, format="json" + reverse_lazy("api_create_channel"), + data={"channel_data": self.channel_data}, + format="json", + ) + self.assertEqual( + resp.status_code, 200, "Got a request error: {}".format(resp.content) ) - self.assertEqual(resp.status_code, 200, "Got a request error: {}".format(resp.content)) def test_creates_channel(self): """ Test that it creates a channel with the given id """ self.admin_client().post( - reverse_lazy("api_create_channel"), data={"channel_data": self.channel_data}, format="json" + reverse_lazy("api_create_channel"), + data={"channel_data": self.channel_data}, + format="json", ) try: Channel.objects.get(id=self.channel_data["id"]) @@ -786,7 +796,9 @@ def test_updates_already_created_channel(self): deleted_channel.save(actor_id=self.user.id) self.channel_data.update({"name": "Updated name", "id": deleted_channel.id}) self.admin_client().post( - reverse_lazy("api_create_channel"), data={"channel_data": self.channel_data}, format="json" + reverse_lazy("api_create_channel"), + data={"channel_data": self.channel_data}, + format="json", ) try: c = Channel.objects.get(id=self.channel_data["id"]) @@ -799,7 +811,9 @@ def test_creates_cheftree(self): Test that it creates a channel with the given id """ self.admin_client().post( - reverse_lazy("api_create_channel"), data={"channel_data": self.channel_data}, format="json" + reverse_lazy("api_create_channel"), + data={"channel_data": self.channel_data}, + format="json", ) try: c = Channel.objects.get(id=self.channel_data["id"]) @@ -813,18 +827,22 @@ def test_associates_file_with_created_channel(self): Check that the file we passed is now associated with the chef_tree we just created. """ - dummy_file = create_studio_file(b"aaaaaaaaaaaaaaa", preset=format_presets.HTML5_ZIP, ext="zip") + dummy_file = create_studio_file( + b"aaaaaaaaaaaaaaa", preset=format_presets.HTML5_ZIP, ext="zip" + ) test_file = { - 'size': len(dummy_file["data"]), - 'preset': format_presets.HTML5_ZIP, - 'filename': dummy_file["name"], - 'original_filename': 'test_file', - 'language': "as", - 'source_url': "https://justatest.com/test_file.zip", + "size": len(dummy_file["data"]), + "preset": format_presets.HTML5_ZIP, + "filename": dummy_file["name"], + "original_filename": "test_file", + "language": "as", + "source_url": "https://justatest.com/test_file.zip", } self.channel_data.update({"files": [test_file]}) self.admin_client().post( - reverse_lazy("api_create_channel"), data={"channel_data": self.channel_data}, format="json" + reverse_lazy("api_create_channel"), + data={"channel_data": self.channel_data}, + format="json", ) try: @@ -838,9 +856,13 @@ def test_associates_extra_fields_with_root_node(self): """ Check that extra_fields information is put on the chef_tree root node """ - self.channel_data.update({"extra_fields": json.dumps({"modality": "CUSTOM_NAVIGATION"})}) + self.channel_data.update( + {"extra_fields": json.dumps({"modality": "CUSTOM_NAVIGATION"})} + ) self.admin_client().post( - reverse_lazy("api_create_channel"), data={"channel_data": self.channel_data}, format="json" + reverse_lazy("api_create_channel"), + data={"channel_data": self.channel_data}, + format="json", ) try: @@ -895,13 +917,19 @@ def _make_node_data(self): def setUp(self): super(ApiAddRemoteNodesToTreeTestCase, self).setUp() self.source_channel = channel() - self.source_video = self.source_channel.main_tree.get_descendants().filter(kind_id=content_kinds.VIDEO).first() + self.source_video = ( + self.source_channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.VIDEO) + .first() + ) # first setup a test channel... self.channel = channel() self.root_node = self.channel.main_tree - temp_file_dict = create_studio_file(thumbnail_bytes, preset=format_presets.VIDEO_THUMBNAIL, ext='jpg') + temp_file_dict = create_studio_file( + thumbnail_bytes, preset=format_presets.VIDEO_THUMBNAIL, ext="jpg" + ) # File used for every node self.fileobj = temp_file_dict["db_file"] @@ -983,9 +1011,7 @@ def test_metadata_properly_created(self): node = ContentNode.objects.get(title="valid_metadata_labels") for label, values in METADATA.items(): - self.assertEqual(getattr(node, label), { - values[0]: True - }) + self.assertEqual(getattr(node, label), {values[0]: True}) def test_metadata_properly_screened_viewer(self): self.root_node.get_descendants().delete() @@ -1012,7 +1038,10 @@ def test_metadata_properly_screened_viewer(self): if key not in METADATA: if hasattr(node, key): # These will be matching even though we don't overwrite them. - if key in ALLOWED_OVERRIDES or key in {"source_channel_id", "source_node_id"}: + if key in ALLOWED_OVERRIDES or key in { + "source_channel_id", + "source_node_id", + }: self.assertEqual(getattr(node, key), value, key) else: self.assertNotEqual(getattr(node, key), value, key) @@ -1028,7 +1057,10 @@ def test_metadata_properly_screened_editor(self): if key not in METADATA: if hasattr(node, key): # These will be matching even though we don't overwrite them. - if key in EDIT_ALLOWED_OVERRIDES or key in {"source_channel_id", "source_node_id"}: + if key in EDIT_ALLOWED_OVERRIDES or key in { + "source_channel_id", + "source_node_id", + }: self.assertEqual(getattr(node, key), value, key) else: self.assertNotEqual(getattr(node, key), value, key) diff --git a/contentcuration/contentcuration/tests/viewsets/base.py b/contentcuration/contentcuration/tests/viewsets/base.py index 97f5cb52f7..617d23bb26 100644 --- a/contentcuration/contentcuration/tests/viewsets/base.py +++ b/contentcuration/contentcuration/tests/viewsets/base.py @@ -8,13 +8,30 @@ from contentcuration.viewsets.sync.constants import CHANNEL from contentcuration.viewsets.sync.constants import SYNCED from contentcuration.viewsets.sync.utils import _generate_event as base_generate_event -from contentcuration.viewsets.sync.utils import generate_copy_event as base_generate_copy_event -from contentcuration.viewsets.sync.utils import generate_create_event as base_generate_create_event -from contentcuration.viewsets.sync.utils import generate_delete_event as base_generate_delete_event -from contentcuration.viewsets.sync.utils import generate_deploy_event as base_generate_deploy_event -from contentcuration.viewsets.sync.utils import generate_publish_event as base_generate_publish_event -from contentcuration.viewsets.sync.utils import generate_update_event as base_generate_update_event -from contentcuration.viewsets.sync.utils import generate_update_descendants_event as base_generate_update_descendants_event +from contentcuration.viewsets.sync.utils import ( + generate_copy_event as base_generate_copy_event, +) +from contentcuration.viewsets.sync.utils import ( + generate_create_event as base_generate_create_event, +) +from contentcuration.viewsets.sync.utils import ( + generate_delete_event as base_generate_delete_event, +) +from contentcuration.viewsets.sync.utils import ( + generate_deploy_event as base_generate_deploy_event, +) +from contentcuration.viewsets.sync.utils import ( + generate_publish_event as base_generate_publish_event, +) +from contentcuration.viewsets.sync.utils import ( + generate_publish_next_event as base_generate_publish_next_event, +) +from contentcuration.viewsets.sync.utils import ( + generate_update_descendants_event as base_generate_update_descendants_event, +) +from contentcuration.viewsets.sync.utils import ( + generate_update_event as base_generate_update_event, +) def generate_copy_event(*args, **kwargs): @@ -41,8 +58,16 @@ def generate_update_event(*args, **kwargs): return event -def generate_sync_channel_event(channel_id, titles_and_descriptions, resource_details, files, assessment_items): - event = base_generate_event(key=channel_id, table=CHANNEL, event_type=SYNCED, channel_id=channel_id, user_id=None) +def generate_sync_channel_event( + channel_id, titles_and_descriptions, resource_details, files, assessment_items +): + event = base_generate_event( + key=channel_id, + table=CHANNEL, + event_type=SYNCED, + channel_id=channel_id, + user_id=None, + ) event["rev"] = random.randint(1, 10000000) event["titles_and_descriptions"] = titles_and_descriptions event["resource_details"] = resource_details @@ -56,17 +81,25 @@ def generate_deploy_channel_event(channel_id, user_id): event["rev"] = random.randint(1, 10000000) return event + def generate_update_descendants_event(*args, **kwargs): event = base_generate_update_descendants_event(*args, **kwargs) event["rev"] = random.randint(1, 10000000) return event + def generate_publish_channel_event(channel_id): event = base_generate_publish_event(channel_id) event["rev"] = random.randint(1, 10000000) return event +def generate_publish_next_event(channel_id): + event = base_generate_publish_next_event(channel_id) + event["rev"] = random.randint(1, 10000000) + return event + + class SyncTestMixin(object): celery_task_always_eager = None @@ -103,4 +136,6 @@ def sync_changes(self, changes): ) def get_allowed_changes(self, response): - return Change.objects.filter(server_rev__in=[c['server_rev'] for c in response.json()["allowed"]]) + return Change.objects.filter( + server_rev__in=[c["server_rev"] for c in response.json()["allowed"]] + ) diff --git a/contentcuration/contentcuration/tests/viewsets/test_assessmentitem.py b/contentcuration/contentcuration/tests/viewsets/test_assessmentitem.py index aff73eeb38..56f85c5799 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_assessmentitem.py +++ b/contentcuration/contentcuration/tests/viewsets/test_assessmentitem.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import json import uuid @@ -18,11 +16,9 @@ class SyncTestCase(SyncTestMixin, StudioAPITestCase): - @property def assessmentitem_metadata(self): return { - "assessment_id": uuid.uuid4().hex, "contentnode": self.channel.main_tree.get_descendants() .filter(kind_id=content_kinds.EXERCISE) @@ -97,7 +93,7 @@ def test_create_assessmentitem_no_node_permission(self): except models.AssessmentItem.DoesNotExist: pass - def test_create_assessmentitem_with_file_question(self): + def test_create_assessmentitem_with_incorrect_file_placeholder_in_question(self): self.client.force_authenticate(user=self.user) assessmentitem = self.assessmentitem_metadata image_file = testdata.fileobj_exercise_image() @@ -107,6 +103,43 @@ def test_create_assessmentitem_with_file_question(self): exercises.IMG_PLACEHOLDER, image_file.checksum, image_file.file_format_id ) + assessmentitem["question"] = question + response = self.sync_changes( + [ + generate_create_event( + [assessmentitem["contentnode"], assessmentitem["assessment_id"]], + ASSESSMENTITEM, + assessmentitem, + channel_id=self.channel.id, + ) + ], + ) + self.assertEqual(response.status_code, 200, response.content) + try: + ai = models.AssessmentItem.objects.get( + assessment_id=assessmentitem["assessment_id"] + ) + except models.AssessmentItem.DoesNotExist: + self.fail("AssessmentItem was not created") + try: + file = ai.files.get() + self.assertEqual(file.id, image_file.id) + self.fail("File was updated") + except models.File.DoesNotExist: + pass + + def test_create_assessmentitem_with_file_question(self): + self.client.force_authenticate(user=self.user) + assessmentitem = self.assessmentitem_metadata + image_file = testdata.fileobj_exercise_image() + image_file.uploaded_by = self.user + image_file.save() + question = "![alt_text](${}/{}.{})".format( + exercises.CONTENT_STORAGE_PLACEHOLDER, + image_file.checksum, + image_file.file_format_id, + ) + assessmentitem["question"] = question response = self.sync_changes( [ @@ -131,6 +164,74 @@ def test_create_assessmentitem_with_file_question(self): except models.File.DoesNotExist: self.fail("File was not updated") + def test_create_assessmentitem_with_file_in_question_no_file_object(self): + self.client.force_authenticate(user=self.user) + assessmentitem = self.assessmentitem_metadata + image_file = testdata.fileobj_exercise_image() + image_file.uploaded_by = self.user + image_file.save() + question = "![alt_text](${}/{}.{})".format( + exercises.CONTENT_STORAGE_PLACEHOLDER, + image_file.checksum, + image_file.file_format_id, + ) + + image_file.delete() + + assessmentitem["question"] = question + response = self.sync_changes( + [ + generate_create_event( + [assessmentitem["contentnode"], assessmentitem["assessment_id"]], + ASSESSMENTITEM, + assessmentitem, + channel_id=self.channel.id, + ) + ], + ) + self.assertEqual(response.status_code, 200, response.content) + try: + ai = models.AssessmentItem.objects.get( + assessment_id=assessmentitem["assessment_id"] + ) + except models.AssessmentItem.DoesNotExist: + self.fail("AssessmentItem was not created") + try: + file = ai.files.get() + self.assertEqual(file.assessment_item_id, ai.id) + except models.File.DoesNotExist: + self.fail("File was not created") + + def test_create_assessmentitem_with_file_in_question_no_file_uploaded(self): + self.client.force_authenticate(user=self.user) + assessmentitem = self.assessmentitem_metadata + question = "![alt_text](${}/{}.{})".format( + exercises.CONTENT_STORAGE_PLACEHOLDER, + "123456789012345678901234567890ab", + "jpg", + ) + + assessmentitem["question"] = question + response = self.sync_changes( + [ + generate_create_event( + [assessmentitem["contentnode"], assessmentitem["assessment_id"]], + ASSESSMENTITEM, + assessmentitem, + channel_id=self.channel.id, + ) + ], + ) + self.assertEqual(response.status_code, 200, response.content) + self.assertEqual(len(response.data["errors"]), 1) + try: + models.AssessmentItem.objects.get( + assessment_id=assessmentitem["assessment_id"] + ) + self.fail("AssessmentItem was created") + except models.AssessmentItem.DoesNotExist: + pass + def test_create_assessmentitem_with_file_answers(self): self.client.force_authenticate(user=self.user) assessmentitem = self.assessmentitem_metadata @@ -138,10 +239,12 @@ def test_create_assessmentitem_with_file_answers(self): image_file.uploaded_by = self.user image_file.save() answer = "![alt_text](${}/{}.{})".format( - exercises.IMG_PLACEHOLDER, image_file.checksum, image_file.file_format_id + exercises.CONTENT_STORAGE_PLACEHOLDER, + image_file.checksum, + image_file.file_format_id, ) - answers = [{'answer': answer, 'correct': False, 'order': 1}] + answers = [{"answer": answer, "correct": False, "order": 1}] assessmentitem["answers"] = json.dumps(answers) @@ -175,7 +278,9 @@ def test_create_assessmentitem_with_file_hints(self): image_file.uploaded_by = self.user image_file.save() hint = "![alt_text](${}/{}.{})".format( - exercises.IMG_PLACEHOLDER, image_file.checksum, image_file.file_format_id + exercises.CONTENT_STORAGE_PLACEHOLDER, + image_file.checksum, + image_file.file_format_id, ) hints = [ {"hint": hint, "order": 1}, @@ -213,7 +318,9 @@ def test_create_assessmentitem_with_file_no_permission(self): assessmentitem = self.assessmentitem_metadata image_file = testdata.fileobj_exercise_image() question = "![alt_text](${}/{}.{})".format( - exercises.IMG_PLACEHOLDER, image_file.checksum, image_file.file_format_id + exercises.CONTENT_STORAGE_PLACEHOLDER, + image_file.checksum, + image_file.file_format_id, ) assessmentitem["question"] = question response = self.sync_changes( @@ -227,14 +334,17 @@ def test_create_assessmentitem_with_file_no_permission(self): ], ) self.assertEqual(response.status_code, 200, response.content) - self.assertEqual(len(response.json()["errors"]), 1) try: - models.AssessmentItem.objects.get( + ai = models.AssessmentItem.objects.get( assessment_id=assessmentitem["assessment_id"] ) - self.fail("AssessmentItem was created") except models.AssessmentItem.DoesNotExist: - pass + self.fail("AssessmentItem was not created") + try: + file = ai.files.get() + self.assertEqual(file.assessment_item_id, ai.id) + except models.File.DoesNotExist: + self.fail("File was not created") self.assertIsNone(image_file.assessment_item) @@ -297,18 +407,52 @@ def test_update_assessmentitem(self): new_question, ) + def test_update_assessmentitem_to_true_false(self): + + assessmentitem = models.AssessmentItem.objects.create( + **self.assessmentitem_db_metadata + ) + new_answers = json.dumps( + [ + {"answer": "True", "correct": True, "order": 1}, + {"answer": "False", "correct": False, "order": 2}, + ] + ) + + self.client.force_authenticate(user=self.user) + response = self.sync_changes( + [ + generate_update_event( + [assessmentitem.contentnode_id, assessmentitem.assessment_id], + ASSESSMENTITEM, + {"type": "true_false", "answers": new_answers}, + channel_id=self.channel.id, + ) + ], + ) + self.assertEqual(response.status_code, 200, response.content) + self.assertEqual( + models.AssessmentItem.objects.get(id=assessmentitem.id).answers, + new_answers, + ) + self.assertEqual( + models.AssessmentItem.objects.get(id=assessmentitem.id).type, + "true_false", + ) + def test_attempt_update_missing_assessmentitem(self): self.client.force_authenticate(user=self.user) response = self.sync_changes( [ - generate_update_event([ - self.channel.main_tree.get_descendants() - .filter(kind_id=content_kinds.EXERCISE) - .first() - .id, - uuid.uuid4().hex - ], + generate_update_event( + [ + self.channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.EXERCISE) + .first() + .id, + uuid.uuid4().hex, + ], ASSESSMENTITEM, {"question": "but why is it missing in the first place?"}, channel_id=self.channel.id, @@ -327,7 +471,9 @@ def test_update_assessmentitem_with_file(self): image_file.uploaded_by = self.user image_file.save() question = "![alt_text](${}/{}.{})".format( - exercises.IMG_PLACEHOLDER, image_file.checksum, image_file.file_format_id + exercises.CONTENT_STORAGE_PLACEHOLDER, + image_file.checksum, + image_file.file_format_id, ) self.client.force_authenticate(user=self.user) @@ -355,7 +501,9 @@ def test_update_assessmentitem_with_file_no_permissions(self): ) image_file = testdata.fileobj_exercise_image() question = "![alt_text](${}/{}.{})".format( - exercises.IMG_PLACEHOLDER, image_file.checksum, image_file.file_format_id + exercises.CONTENT_STORAGE_PLACEHOLDER, + image_file.checksum, + image_file.file_format_id, ) self.client.force_authenticate(user=self.user) @@ -370,13 +518,13 @@ def test_update_assessmentitem_with_file_no_permissions(self): ], ) self.assertEqual(response.status_code, 200, response.content) - self.assertEqual(len(response.json()["errors"]), 1) try: file = assessmentitem.files.get() - self.assertNotEqual(file.id, image_file.id) - self.fail("File was updated") + self.assertEqual(file.assessment_item_id, assessmentitem.id) except models.File.DoesNotExist: - pass + self.fail("File was not created") + + self.assertIsNone(image_file.assessment_item) def test_update_assessmentitem_remove_file(self): @@ -542,7 +690,12 @@ def test_delete_assessmentitems(self): def test_valid_hints_assessmentitem(self): self.client.force_authenticate(user=self.user) assessmentitem = self.assessmentitem_metadata - assessmentitem["hints"] = json.dumps([{'hint': 'asdasdwdqasd', 'order': 1}, {'hint': 'testing the hint', 'order': 2}]) + assessmentitem["hints"] = json.dumps( + [ + {"hint": "asdasdwdqasd", "order": 1}, + {"hint": "testing the hint", "order": 2}, + ] + ) response = self.sync_changes( [ generate_create_event( @@ -578,10 +731,15 @@ def test_invalid_hints_assessmentitem(self): ) self.assertEqual(response.json()["errors"][0]["table"], "assessmentitem") - self.assertEqual(response.json()["errors"][0]["errors"]["hints"][0], "JSON Data Invalid for hints") + self.assertEqual( + response.json()["errors"][0]["errors"]["hints"][0], + "JSON Data Invalid for hints", + ) self.assertEqual(len(response.json()["errors"]), 1) - with self.assertRaises(models.AssessmentItem.DoesNotExist, msg="AssessmentItem was created"): + with self.assertRaises( + models.AssessmentItem.DoesNotExist, msg="AssessmentItem was created" + ): models.AssessmentItem.objects.get( assessment_id=assessmentitem["assessment_id"] ) @@ -589,10 +747,13 @@ def test_invalid_hints_assessmentitem(self): def test_valid_answers_assessmentitem(self): self.client.force_authenticate(user=self.user) assessmentitem = self.assessmentitem_metadata - assessmentitem["answers"] = json.dumps([{'answer': 'test answer 1 :)', 'correct': False, 'order': 1}, - {'answer': 'test answer 2 :)', 'correct': False, 'order': 2}, - {'answer': 'test answer 3 :)', 'correct': True, 'order': 3} - ]) + assessmentitem["answers"] = json.dumps( + [ + {"answer": "test answer 1 :)", "correct": False, "order": 1}, + {"answer": "test answer 2 :)", "correct": False, "order": 2}, + {"answer": "test answer 3 :)", "correct": True, "order": 3}, + ] + ) response = self.sync_changes( [ generate_create_event( @@ -628,10 +789,15 @@ def test_invalid_answers_assessmentitem(self): ) self.assertEqual(response.json()["errors"][0]["table"], "assessmentitem") - self.assertEqual(response.json()["errors"][0]["errors"]["answers"][0], "JSON Data Invalid for answers") + self.assertEqual( + response.json()["errors"][0]["errors"]["answers"][0], + "JSON Data Invalid for answers", + ) self.assertEqual(len(response.json()["errors"]), 1) - with self.assertRaises(models.AssessmentItem.DoesNotExist, msg="AssessmentItem was created"): + with self.assertRaises( + models.AssessmentItem.DoesNotExist, msg="AssessmentItem was created" + ): models.AssessmentItem.objects.get( assessment_id=assessmentitem["assessment_id"] ) @@ -668,7 +834,9 @@ def test_create_assessmentitem(self): self.client.force_authenticate(user=self.user) assessmentitem = self.assessmentitem_metadata response = self.client.post( - reverse("assessmentitem-list"), assessmentitem, format="json", + reverse("assessmentitem-list"), + assessmentitem, + format="json", ) self.assertEqual(response.status_code, 405, response.content) @@ -709,7 +877,8 @@ def setUp(self): def _get_assessmentitem_metadata(self, assessment_id=None, contentnode_id=None): return { "assessment_id": assessment_id or uuid.uuid4().hex, - "contentnode_id": contentnode_id or self.channel.main_tree.get_descendants() + "contentnode_id": contentnode_id + or self.channel.main_tree.get_descendants() .filter(kind_id=content_kinds.EXERCISE) .first() .id, @@ -752,69 +921,133 @@ def _delete_assessmentitem(self, assessmentitem): def test_content_id__same_on_copy(self): # Make a copy of an existing assessmentitem contentnode. - assessmentitem_node = self.channel.main_tree.get_descendants().filter(kind_id=content_kinds.EXERCISE).first() - assessmentitem_node_copy = assessmentitem_node.copy_to(target=self.channel.main_tree) + assessmentitem_node = ( + self.channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.EXERCISE) + .first() + ) + assessmentitem_node_copy = assessmentitem_node.copy_to( + target=self.channel.main_tree + ) # Assert after copying content_id is same. assessmentitem_node.refresh_from_db() assessmentitem_node_copy.refresh_from_db() - self.assertEqual(assessmentitem_node.content_id, assessmentitem_node_copy.content_id) + self.assertEqual( + assessmentitem_node.content_id, assessmentitem_node_copy.content_id + ) def test_content_id__changes_on_new_assessmentitem(self): # Make a copy of an existing assessmentitem contentnode. - assessmentitem_node = self.channel.main_tree.get_descendants().filter(kind_id=content_kinds.EXERCISE).first() - assessmentitem_node_copy = assessmentitem_node.copy_to(target=self.channel.main_tree) + assessmentitem_node = ( + self.channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.EXERCISE) + .first() + ) + assessmentitem_node_copy = assessmentitem_node.copy_to( + target=self.channel.main_tree + ) # Create a new assessmentitem. - self._create_assessmentitem(self._get_assessmentitem_metadata(contentnode_id=assessmentitem_node_copy.id)) + self._create_assessmentitem( + self._get_assessmentitem_metadata( + contentnode_id=assessmentitem_node_copy.id + ) + ) # Assert after creating a new assessmentitem on copied node, it's content_id should change. assessmentitem_node.refresh_from_db() assessmentitem_node_copy.refresh_from_db() - self.assertNotEqual(assessmentitem_node.content_id, assessmentitem_node_copy.content_id) + self.assertNotEqual( + assessmentitem_node.content_id, assessmentitem_node_copy.content_id + ) def test_content_id__changes_on_deleting_assessmentitem(self): # Make a copy of an existing assessmentitem contentnode. - assessmentitem_node = self.channel.main_tree.get_descendants().filter(kind_id=content_kinds.EXERCISE).first() - assessmentitem_node_copy = assessmentitem_node.copy_to(target=self.channel.main_tree) + assessmentitem_node = ( + self.channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.EXERCISE) + .first() + ) + assessmentitem_node_copy = assessmentitem_node.copy_to( + target=self.channel.main_tree + ) # Delete an already present assessmentitem from copied contentnode. - assessmentitem_from_db = models.AssessmentItem.objects.filter(contentnode=assessmentitem_node_copy.id).first() - self._delete_assessmentitem(self._get_assessmentitem_metadata(assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id)) + assessmentitem_from_db = models.AssessmentItem.objects.filter( + contentnode=assessmentitem_node_copy.id + ).first() + self._delete_assessmentitem( + self._get_assessmentitem_metadata( + assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id + ) + ) # Assert after deleting assessmentitem on copied node, it's content_id should change. assessmentitem_node.refresh_from_db() assessmentitem_node_copy.refresh_from_db() - self.assertNotEqual(assessmentitem_node.content_id, assessmentitem_node_copy.content_id) + self.assertNotEqual( + assessmentitem_node.content_id, assessmentitem_node_copy.content_id + ) def test_content_id__changes_on_updating_assessmentitem(self): # Make a copy of an existing assessmentitem contentnode. - assessmentitem_node = self.channel.main_tree.get_descendants().filter(kind_id=content_kinds.EXERCISE).first() - assessmentitem_node_copy = assessmentitem_node.copy_to(target=self.channel.main_tree) + assessmentitem_node = ( + self.channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.EXERCISE) + .first() + ) + assessmentitem_node_copy = assessmentitem_node.copy_to( + target=self.channel.main_tree + ) # Update an already present assessmentitem from copied contentnode. - assessmentitem_from_db = models.AssessmentItem.objects.filter(contentnode=assessmentitem_node_copy.id).first() - self._update_assessmentitem(self._get_assessmentitem_metadata(assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id), - {"question": "New Question!"}) + assessmentitem_from_db = models.AssessmentItem.objects.filter( + contentnode=assessmentitem_node_copy.id + ).first() + self._update_assessmentitem( + self._get_assessmentitem_metadata( + assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id + ), + {"question": "New Question!"}, + ) # Assert after updating assessmentitem on copied node, it's content_id should change. assessmentitem_node.refresh_from_db() assessmentitem_node_copy.refresh_from_db() - self.assertNotEqual(assessmentitem_node.content_id, assessmentitem_node_copy.content_id) + self.assertNotEqual( + assessmentitem_node.content_id, assessmentitem_node_copy.content_id + ) def test_content_id__doesnot_changes_of_original_node(self): # Make a copy of an existing assessmentitem contentnode. - assessmentitem_node = self.channel.main_tree.get_descendants().filter(kind_id=content_kinds.EXERCISE).first() + assessmentitem_node = ( + self.channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.EXERCISE) + .first() + ) assessmentitem_node.copy_to(target=self.channel.main_tree) content_id_before_updates = assessmentitem_node.content_id # Create, update and delete assessmentitems from original contentnode. - assessmentitem_from_db = models.AssessmentItem.objects.filter(contentnode=assessmentitem_node.id).first() - self._update_assessmentitem(self._get_assessmentitem_metadata(assessmentitem_from_db.assessment_id, assessmentitem_node.id), - {"question": "New Question!"}) - self._delete_assessmentitem(self._get_assessmentitem_metadata(assessmentitem_from_db.assessment_id, assessmentitem_node.id)) - self._create_assessmentitem(self._get_assessmentitem_metadata(contentnode_id=assessmentitem_node.id)) + assessmentitem_from_db = models.AssessmentItem.objects.filter( + contentnode=assessmentitem_node.id + ).first() + self._update_assessmentitem( + self._get_assessmentitem_metadata( + assessmentitem_from_db.assessment_id, assessmentitem_node.id + ), + {"question": "New Question!"}, + ) + self._delete_assessmentitem( + self._get_assessmentitem_metadata( + assessmentitem_from_db.assessment_id, assessmentitem_node.id + ) + ) + self._create_assessmentitem( + self._get_assessmentitem_metadata(contentnode_id=assessmentitem_node.id) + ) # Assert content_id before and after updates remain same. assessmentitem_node.refresh_from_db() @@ -823,25 +1056,59 @@ def test_content_id__doesnot_changes_of_original_node(self): def test_content_id__doesnot_changes_if_already_unique(self): # Make a copy of an existing assessmentitem contentnode. - assessmentitem_node = self.channel.main_tree.get_descendants().filter(kind_id=content_kinds.EXERCISE).first() - assessmentitem_node_copy = assessmentitem_node.copy_to(target=self.channel.main_tree) + assessmentitem_node = ( + self.channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.EXERCISE) + .first() + ) + assessmentitem_node_copy = assessmentitem_node.copy_to( + target=self.channel.main_tree + ) # Create, update and delete assessmentitems of copied contentnode. - assessmentitem_from_db = models.AssessmentItem.objects.filter(contentnode=assessmentitem_node_copy.id).first() - self._update_assessmentitem(self._get_assessmentitem_metadata(assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id), - {"question": "New Question!"}) - self._delete_assessmentitem(self._get_assessmentitem_metadata(assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id)) - self._create_assessmentitem(self._get_assessmentitem_metadata(contentnode_id=assessmentitem_node_copy.id)) + assessmentitem_from_db = models.AssessmentItem.objects.filter( + contentnode=assessmentitem_node_copy.id + ).first() + self._update_assessmentitem( + self._get_assessmentitem_metadata( + assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id + ), + {"question": "New Question!"}, + ) + self._delete_assessmentitem( + self._get_assessmentitem_metadata( + assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id + ) + ) + self._create_assessmentitem( + self._get_assessmentitem_metadata( + contentnode_id=assessmentitem_node_copy.id + ) + ) assessmentitem_node_copy.refresh_from_db() content_id_after_first_update = assessmentitem_node_copy.content_id # Once again, let us create, update and delete assessmentitems of copied contentnode. - assessmentitem_from_db = models.AssessmentItem.objects.filter(contentnode=assessmentitem_node_copy.id).first() - self._update_assessmentitem(self._get_assessmentitem_metadata(assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id), - {"question": "New Question!"}) - self._delete_assessmentitem(self._get_assessmentitem_metadata(assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id)) - self._create_assessmentitem(self._get_assessmentitem_metadata(contentnode_id=assessmentitem_node_copy.id)) + assessmentitem_from_db = models.AssessmentItem.objects.filter( + contentnode=assessmentitem_node_copy.id + ).first() + self._update_assessmentitem( + self._get_assessmentitem_metadata( + assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id + ), + {"question": "New Question!"}, + ) + self._delete_assessmentitem( + self._get_assessmentitem_metadata( + assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id + ) + ) + self._create_assessmentitem( + self._get_assessmentitem_metadata( + contentnode_id=assessmentitem_node_copy.id + ) + ) assessmentitem_node_copy.refresh_from_db() content_id_after_second_update = assessmentitem_node_copy.content_id diff --git a/contentcuration/contentcuration/tests/viewsets/test_bookmark.py b/contentcuration/contentcuration/tests/viewsets/test_bookmark.py index 04d53cd756..815c14de56 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_bookmark.py +++ b/contentcuration/contentcuration/tests/viewsets/test_bookmark.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from django.urls import reverse from contentcuration import models @@ -12,7 +10,6 @@ class SyncTestCase(SyncTestMixin, StudioAPITestCase): - @property def bookmark_metadata(self): return { @@ -120,9 +117,7 @@ def test_delete_bookmarks(self): ) data2 = self.bookmark_db_metadata data2["channel_id"] = self.channel2.id - bookmark2 = models.Channel.bookmarked_by.through.objects.create( - **data2 - ) + bookmark2 = models.Channel.bookmarked_by.through.objects.create(**data2) self.client.force_authenticate(user=self.user) response = self.sync_changes( @@ -177,7 +172,9 @@ def test_create_bookmark(self): self.client.force_authenticate(user=self.user) bookmark = self.bookmark_metadata response = self.client.post( - reverse("bookmark-list"), bookmark, format="json", + reverse("bookmark-list"), + bookmark, + format="json", ) self.assertEqual(response.status_code, 405, response.content) diff --git a/contentcuration/contentcuration/tests/viewsets/test_channel.py b/contentcuration/contentcuration/tests/viewsets/test_channel.py index 17549ab128..8309f47c8c 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_channel.py +++ b/contentcuration/contentcuration/tests/viewsets/test_channel.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import uuid import mock @@ -8,6 +6,7 @@ from django.urls import reverse from kolibri_public.models import ContentNode as PublicContentNode from le_utils.constants import content_kinds +from mock import patch from contentcuration import models from contentcuration import models as cc @@ -19,6 +18,7 @@ from contentcuration.tests.viewsets.base import generate_delete_event from contentcuration.tests.viewsets.base import generate_deploy_channel_event from contentcuration.tests.viewsets.base import generate_publish_channel_event +from contentcuration.tests.viewsets.base import generate_publish_next_event from contentcuration.tests.viewsets.base import generate_sync_channel_event from contentcuration.tests.viewsets.base import generate_update_event from contentcuration.tests.viewsets.base import SyncTestMixin @@ -27,6 +27,16 @@ class SyncTestCase(SyncTestMixin, StudioAPITestCase): + @classmethod + def setUpClass(cls): + super(SyncTestCase, cls).setUpClass() + cls.patch_copy_db = patch("contentcuration.utils.publish.save_export_database") + cls.mock_save_export = cls.patch_copy_db.start() + + @classmethod + def tearDownClass(cls): + super(SyncTestCase, cls).tearDownClass() + cls.patch_copy_db.stop() @property def channel_metadata(self): @@ -41,7 +51,11 @@ def test_create_channel(self): self.client.force_authenticate(user=user) channel = self.channel_metadata response = self.sync_changes( - [generate_create_event(channel["id"], CHANNEL, channel, channel_id=channel["id"])] + [ + generate_create_event( + channel["id"], CHANNEL, channel, channel_id=channel["id"] + ) + ] ) self.assertEqual(response.status_code, 200, response.content) try: @@ -56,8 +70,12 @@ def test_create_channels(self): channel2 = self.channel_metadata response = self.sync_changes( [ - generate_create_event(channel1["id"], CHANNEL, channel1, channel_id=channel1["id"]), - generate_create_event(channel2["id"], CHANNEL, channel2, channel_id=channel2["id"]), + generate_create_event( + channel1["id"], CHANNEL, channel1, channel_id=channel1["id"] + ), + generate_create_event( + channel2["id"], CHANNEL, channel2, channel_id=channel2["id"] + ), ] ) self.assertEqual(response.status_code, 200, response.content) @@ -73,63 +91,95 @@ def test_create_channels(self): def test_update_channel(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel.editors.add(user) new_name = "This is not the old name" self.client.force_authenticate(user=user) response = self.sync_changes( - [generate_update_event(channel.id, CHANNEL, {"name": new_name}, channel_id=channel.id)] + [ + generate_update_event( + channel.id, CHANNEL, {"name": new_name}, channel_id=channel.id + ) + ] ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual(models.Channel.objects.get(id=channel.id).name, new_name) def test_update_channel_thumbnail_encoding(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel.editors.add(user) new_encoding = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAfQA" self.client.force_authenticate(user=user) response = self.sync_changes( - [generate_update_event(channel.id, CHANNEL, { - "thumbnail_encoding.base64": new_encoding, - "thumbnail_encoding.orientation": 1, - "thumbnail_encoding.scale": 0.73602189113443, - "thumbnail_encoding.startX": -96.66631072431669, - "thumbnail_encoding.startY": -335.58116356397636, - }, channel_id=channel.id)] + [ + generate_update_event( + channel.id, + CHANNEL, + { + "thumbnail_encoding.base64": new_encoding, + "thumbnail_encoding.orientation": 1, + "thumbnail_encoding.scale": 0.73602189113443, + "thumbnail_encoding.startX": -96.66631072431669, + "thumbnail_encoding.startY": -335.58116356397636, + }, + channel_id=channel.id, + ) + ] ) self.assertEqual(response.status_code, 200, response.content) - self.assertEqual(models.Channel.objects.get(id=channel.id).thumbnail_encoding["base64"], new_encoding) + self.assertEqual( + models.Channel.objects.get(id=channel.id).thumbnail_encoding["base64"], + new_encoding, + ) def test_cannot_update_channel(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) new_name = "This is not the old name" self.client.force_authenticate(user=user) response = self.sync_changes( - [generate_update_event(channel.id, CHANNEL, {"name": new_name}, channel_id=channel.id)], + [ + generate_update_event( + channel.id, CHANNEL, {"name": new_name}, channel_id=channel.id + ) + ], ) self.assertEqual(len(response.json()["disallowed"]), 1, response.content) self.assertNotEqual(models.Channel.objects.get(id=channel.id).name, new_name) def test_viewer_cannot_update_channel(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel.viewers.add(user) new_name = "This is not the old name" self.client.force_authenticate(user=user) response = self.sync_changes( - [generate_update_event(channel.id, CHANNEL, {"name": new_name}, channel_id=channel.id)], + [ + generate_update_event( + channel.id, CHANNEL, {"name": new_name}, channel_id=channel.id + ) + ], ) self.assertEqual(len(response.json()["disallowed"]), 1, response.content) self.assertNotEqual(models.Channel.objects.get(id=channel.id).name, new_name) def test_update_channel_defaults(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel.editors.add(user) author = "This is not the old author" @@ -137,7 +187,10 @@ def test_update_channel_defaults(self): response = self.sync_changes( [ generate_update_event( - channel.id, CHANNEL, {"content_defaults.author": author}, channel_id=channel.id + channel.id, + CHANNEL, + {"content_defaults.author": author}, + channel_id=channel.id, ) ] ) @@ -152,7 +205,10 @@ def test_update_channel_defaults(self): response = self.sync_changes( [ generate_update_event( - channel.id, CHANNEL, {"content_defaults.aggregator": aggregator}, channel_id=channel.id + channel.id, + CHANNEL, + {"content_defaults.aggregator": aggregator}, + channel_id=channel.id, ) ] ) @@ -167,17 +223,25 @@ def test_update_channel_defaults(self): def test_update_channels(self): user = testdata.user() - channel1 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel1 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel1.editors.add(user) - channel2 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel2 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel2.editors.add(user) new_name = "This is not the old name" self.client.force_authenticate(user=user) response = self.sync_changes( [ - generate_update_event(channel1.id, CHANNEL, {"name": new_name}, channel_id=channel1.id), - generate_update_event(channel2.id, CHANNEL, {"name": new_name}, channel_id=channel2.id), + generate_update_event( + channel1.id, CHANNEL, {"name": new_name}, channel_id=channel1.id + ), + generate_update_event( + channel2.id, CHANNEL, {"name": new_name}, channel_id=channel2.id + ), ] ) self.assertEqual(response.status_code, 200, response.content) @@ -186,16 +250,24 @@ def test_update_channels(self): def test_cannot_update_some_channels(self): user = testdata.user() - channel1 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel1 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel1.editors.add(user) - channel2 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel2 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) new_name = "This is not the old name" self.client.force_authenticate(user=user) response = self.sync_changes( [ - generate_update_event(channel1.id, CHANNEL, {"name": new_name}, channel_id=channel1.id), - generate_update_event(channel2.id, CHANNEL, {"name": new_name}, channel_id=channel2.id), + generate_update_event( + channel1.id, CHANNEL, {"name": new_name}, channel_id=channel1.id + ), + generate_update_event( + channel2.id, CHANNEL, {"name": new_name}, channel_id=channel2.id + ), ], ) self.assertEqual(len(response.json()["disallowed"]), 1, response.content) @@ -204,17 +276,25 @@ def test_cannot_update_some_channels(self): def test_viewer_cannot_update_some_channels(self): user = testdata.user() - channel1 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel1 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel1.editors.add(user) - channel2 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel2 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel2.viewers.add(user) new_name = "This is not the old name" self.client.force_authenticate(user=user) response = self.sync_changes( [ - generate_update_event(channel1.id, CHANNEL, {"name": new_name}, channel_id=channel1.id), - generate_update_event(channel2.id, CHANNEL, {"name": new_name}, channel_id=channel2.id), + generate_update_event( + channel1.id, CHANNEL, {"name": new_name}, channel_id=channel1.id + ), + generate_update_event( + channel2.id, CHANNEL, {"name": new_name}, channel_id=channel2.id + ), ], ) self.assertEqual(len(response.json()["disallowed"]), 1, response.content) @@ -223,11 +303,15 @@ def test_viewer_cannot_update_some_channels(self): def test_delete_channel(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel.editors.add(user) self.client.force_authenticate(user=user) - response = self.sync_changes([generate_delete_event(channel.id, CHANNEL, channel_id=channel.id)]) + response = self.sync_changes( + [generate_delete_event(channel.id, CHANNEL, channel_id=channel.id)] + ) self.assertEqual(response.status_code, 200, response.content) channel = models.Channel.objects.get(id=channel.id) self.assertTrue(channel.deleted) @@ -235,7 +319,9 @@ def test_delete_channel(self): def test_cannot_delete_channel(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) self.client.force_authenticate(user=user) response = self.sync_changes( @@ -251,10 +337,14 @@ def test_cannot_delete_channel(self): def test_delete_channels(self): user = testdata.user() - channel1 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel1 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel1.editors.add(user) - channel2 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel2 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel2.editors.add(user) self.client.force_authenticate(user=user) @@ -270,9 +360,13 @@ def test_delete_channels(self): def test_cannot_delete_some_channels(self): user = testdata.user() - channel1 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel1 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel1.editors.add(user) - channel2 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel2 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) self.client.force_authenticate(user=user) response = self.sync_changes( @@ -301,11 +395,7 @@ def test_sync_channel_called_correctly(self, sync_channel_mock): args = [channel.id, False, False, False, False] args[i] = True - response = self.sync_changes( - [ - generate_sync_channel_event(*args) - ] - ) + response = self.sync_changes([generate_sync_channel_event(*args)]) self.assertEqual(response.status_code, 200) sync_channel_mock.assert_called_once() @@ -315,9 +405,7 @@ def test_deploy_channel_event(self): channel = testdata.channel() user = testdata.user() channel.editors.add(user) - self.client.force_authenticate( - user - ) # This will skip all authentication checks + self.client.force_authenticate(user) # This will skip all authentication checks channel.main_tree.refresh_from_db() channel.staging_tree = cc.ContentNode( @@ -337,10 +425,8 @@ def test_deploy_channel_event(self): self.contentnode = cc.ContentNode.objects.create(kind_id="video") response = self.sync_changes( - [ - generate_deploy_channel_event(channel.id, user.id) - ] - ) + [generate_deploy_channel_event(channel.id, user.id)] + ) self.assertEqual(response.status_code, 200) modified_channel = models.Channel.objects.get(id=channel.id) @@ -352,9 +438,7 @@ def test_deploy_with_staging_tree_None(self): channel = testdata.channel() user = testdata.user() channel.editors.add(user) - self.client.force_authenticate( - user - ) # This will skip all authentication checks + self.client.force_authenticate(user) # This will skip all authentication checks channel.main_tree.refresh_from_db() channel.staging_tree = None @@ -370,10 +454,8 @@ def test_deploy_with_staging_tree_None(self): self.contentnode = cc.ContentNode.objects.create(kind_id="video") response = self.sync_changes( - [ - generate_deploy_channel_event(channel.id, user.id) - ] - ) + [generate_deploy_channel_event(channel.id, user.id)] + ) # Should raise validation error as staging tree was set to NONE self.assertEqual(len(response.json()["errors"]), 1, response.content) modified_channel = models.Channel.objects.get(id=channel.id) @@ -382,17 +464,59 @@ def test_deploy_with_staging_tree_None(self): def test_publish_does_not_make_publishable(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel.editors.add(user) - self.sync_changes( - [ - generate_publish_channel_event(channel.id) - ] - ) + self.sync_changes([generate_publish_channel_event(channel.id)]) self.assertEqual(_unpublished_changes_query(channel).count(), 0) + def test_publish_next(self): + channel = testdata.channel() + user = testdata.user() + channel.editors.add(user) + self.client.force_authenticate(user) # This will skip all authentication checks + + channel.staging_tree = testdata.tree() + node = testdata.node({"kind_id": "video", "title": "title", "children": []}) + node.complete = True + node.parent = channel.staging_tree + node.save() + channel.staging_tree.save() + channel.save() + self.assertEqual(channel.staging_tree.published, False) + + response = self.sync_changes([generate_publish_next_event(channel.id)]) + + self.assertEqual(response.status_code, 200) + modified_channel = models.Channel.objects.get(id=channel.id) + self.assertEqual(modified_channel.staging_tree.published, True) + + def test_publish_next_with_incomplete_staging_tree(self): + channel = testdata.channel() + user = testdata.user() + channel.editors.add(user) + self.client.force_authenticate(user) # This will skip all authentication checks + + channel.staging_tree = cc.ContentNode( + kind_id=content_kinds.TOPIC, title="test", node_id="aaa" + ) + channel.staging_tree.save() + channel.save() + self.assertEqual(channel.staging_tree.published, False) + + response = self.sync_changes([generate_publish_next_event(channel.id)]) + + self.assertEqual(response.status_code, 200) + self.assertTrue( + "Channel is not ready to be published" + in response.json()["errors"][0]["errors"][0] + ) + modified_channel = models.Channel.objects.get(id=channel.id) + self.assertEqual(modified_channel.staging_tree.published, False) + class CRUDTestCase(StudioAPITestCase): @property @@ -405,12 +529,15 @@ def channel_metadata(self): def test_fetch_channel_for_admin(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) user.is_admin = True user.save() self.client.force_authenticate(user=user) response = self.client.get( - reverse("channel-detail", kwargs={"pk": channel.id}), format="json", + reverse("channel-detail", kwargs={"pk": channel.id}), + format="json", ) self.assertEqual(response.status_code, 200, response.content) @@ -422,7 +549,8 @@ def test_fetch_admin_channels_invalid_filter(self): user.save() self.client.force_authenticate(user=user) response = self.client.get( - reverse("admin-channels-list") + "?public=true&page_size=25&edit=true", format="json", + reverse("admin-channels-list") + "?public=true&page_size=25&edit=true", + format="json", ) self.assertEqual(response.status_code, 200, response.content) @@ -430,7 +558,11 @@ def test_create_channel(self): user = testdata.user() self.client.force_authenticate(user=user) channel = self.channel_metadata - response = self.client.post(reverse("channel-list"), channel, format="json",) + response = self.client.post( + reverse("channel-list"), + channel, + format="json", + ) self.assertEqual(response.status_code, 201, response.content) try: models.Channel.objects.get(id=channel["id"]) @@ -439,7 +571,9 @@ def test_create_channel(self): def test_update_channel(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel.editors.add(user) new_name = "This is not the old name" @@ -453,7 +587,9 @@ def test_update_channel(self): def test_delete_channel(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel.editors.add(user) self.client.force_authenticate(user=user) @@ -470,7 +606,9 @@ def test_admin_restore_channel(self): user.is_admin = True user.is_staff = True user.save() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel.editors.add(user) channel.deleted = True channel.save(actor_id=user.id) @@ -484,16 +622,31 @@ def test_admin_restore_channel(self): self.assertEqual(response.status_code, 200, response.content) channel = models.Channel.objects.get(id=channel.id) self.assertFalse(channel.deleted) - self.assertEqual(1, channel.history.filter(actor=user, action=channel_history.RECOVERY).count()) + self.assertEqual( + 1, + channel.history.filter(actor=user, action=channel_history.RECOVERY).count(), + ) class UnpublishedChangesQueryTestCase(StudioAPITestCase): def test_unpublished_changes_query_with_channel_object(self): channel = testdata.channel() user = testdata.user() - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id), created_by_id=user.id) - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id), created_by_id=user.id) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id + ), + created_by_id=user.id, + ) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id + ), + created_by_id=user.id, + ) queryset = _unpublished_changes_query(channel) self.assertEqual(queryset.count(), 1) @@ -502,24 +655,42 @@ def test_unpublished_changes_query_with_channel_object(self): def test_unpublished_changes_query_with_channel_object_none_since_publish(self): channel = testdata.channel() user = testdata.user() - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id), created_by_id=user.id) - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id), created_by_id=user.id) - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id + ), + created_by_id=user.id, + ) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id + ), + created_by_id=user.id, + ) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) queryset = _unpublished_changes_query(channel) self.assertEqual(queryset.count(), 0) - def test_unpublished_changes_query_with_channel_object_no_publishable_since_publish(self): + def test_unpublished_changes_query_with_channel_object_no_publishable_since_publish( + self, + ): channel = testdata.channel() user = testdata.user() - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id), created_by_id=user.id) - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) models.Change.create_change( generate_update_event( - channel.id, - CHANNEL, - {"name": "new name 2"}, - channel_id=channel.id + channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id + ), + created_by_id=user.id, + ) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id ), created_by_id=user.id, unpublishable=True, @@ -528,22 +699,30 @@ def test_unpublished_changes_query_with_channel_object_no_publishable_since_publ queryset = _unpublished_changes_query(channel) self.assertEqual(queryset.count(), 0) - def test_unpublished_changes_query_with_channel_object_no_publishable_since_publish_if_publish_fails_through_error(self): + def test_unpublished_changes_query_with_channel_object_no_publishable_since_publish_if_publish_fails_through_error( + self, + ): channel = testdata.channel() user = testdata.user() channel.main_tree = None channel.save() - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) queryset = _unpublished_changes_query(channel) self.assertEqual(queryset.count(), 0) - def test_unpublished_changes_query_with_channel_object_no_publishable_since_publish_if_publish_fails_because_incomplete(self): + def test_unpublished_changes_query_with_channel_object_no_publishable_since_publish_if_publish_fails_because_incomplete( + self, + ): channel = testdata.channel() user = testdata.user() channel.main_tree.complete = False channel.save() - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) queryset = _unpublished_changes_query(channel) self.assertEqual(queryset.count(), 0) @@ -551,108 +730,153 @@ def test_unpublished_changes_query_with_channel_object_no_publishable_since_publ def test_unpublished_changes_query_with_outerref(self): channel = testdata.channel() user = testdata.user() - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id), created_by_id=user.id) - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id), created_by_id=user.id) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id + ), + created_by_id=user.id, + ) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id + ), + created_by_id=user.id, + ) outer_ref = OuterRef("id") unpublished_changes = _unpublished_changes_query(outer_ref) - channels = models.Channel.objects.filter(pk=channel.pk).annotate(unpublished_changes=Exists(unpublished_changes)) + channels = models.Channel.objects.filter(pk=channel.pk).annotate( + unpublished_changes=Exists(unpublished_changes) + ) self.assertTrue(channels[0].unpublished_changes) def test_unpublished_changes_query_with_outerref_none_since_publish(self): channel = testdata.channel() user = testdata.user() - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id), created_by_id=user.id) - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id), created_by_id=user.id) - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id + ), + created_by_id=user.id, + ) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id + ), + created_by_id=user.id, + ) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) outer_ref = OuterRef("id") unpublished_changes = _unpublished_changes_query(outer_ref) - channels = models.Channel.objects.filter(pk=channel.pk).annotate(unpublished_changes=Exists(unpublished_changes)) + channels = models.Channel.objects.filter(pk=channel.pk).annotate( + unpublished_changes=Exists(unpublished_changes) + ) self.assertFalse(channels[0].unpublished_changes) def test_unpublished_changes_query_with_outerref_no_publishable_since_publish(self): channel = testdata.channel() user = testdata.user() - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id), created_by_id=user.id) - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) models.Change.create_change( generate_update_event( - channel.id, - CHANNEL, - {"name": "new name 2"}, - channel_id=channel.id + channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id ), created_by_id=user.id, - unpublishable=True + ) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id + ), + created_by_id=user.id, + unpublishable=True, ) outer_ref = OuterRef("id") unpublished_changes = _unpublished_changes_query(outer_ref) - channels = models.Channel.objects.filter(pk=channel.pk).annotate(unpublished_changes=Exists(unpublished_changes)) + channels = models.Channel.objects.filter(pk=channel.pk).annotate( + unpublished_changes=Exists(unpublished_changes) + ) self.assertFalse(channels[0].unpublished_changes) - def test_unpublished_changes_query_no_publishable_since_publish_if_publish_fails_through_error(self): + def test_unpublished_changes_query_no_publishable_since_publish_if_publish_fails_through_error( + self, + ): channel = testdata.channel() user = testdata.user() channel.main_tree = None channel.save() - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) outer_ref = OuterRef("id") unpublished_changes = _unpublished_changes_query(outer_ref) - channels = models.Channel.objects.filter(pk=channel.pk).annotate(unpublished_changes=Exists(unpublished_changes)) + channels = models.Channel.objects.filter(pk=channel.pk).annotate( + unpublished_changes=Exists(unpublished_changes) + ) self.assertFalse(channels[0].unpublished_changes) - def test_unpublished_changes_query_no_publishable_since_publish_if_publish_fails_because_incomplete(self): + def test_unpublished_changes_query_no_publishable_since_publish_if_publish_fails_because_incomplete( + self, + ): channel = testdata.channel() user = testdata.user() channel.main_tree.complete = False channel.save() - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) outer_ref = OuterRef("id") unpublished_changes = _unpublished_changes_query(outer_ref) - channels = models.Channel.objects.filter(pk=channel.pk).annotate(unpublished_changes=Exists(unpublished_changes)) + channels = models.Channel.objects.filter(pk=channel.pk).annotate( + unpublished_changes=Exists(unpublished_changes) + ) self.assertFalse(channels[0].unpublished_changes) class ChannelLanguageTestCase(StudioAPITestCase): - def setUp(self): super(ChannelLanguageTestCase, self).setUp() self.channel = testdata.channel() - self.channel.language_id = 'en' + self.channel.language_id = "en" self.channel.save() self.channel_id = self.channel.id - self.node_id = '00000000000000000000000000000003' + self.node_id = "00000000000000000000000000000003" self.public_node = PublicContentNode.objects.create( id=uuid.UUID(self.node_id), - title='Video 1', + title="Video 1", content_id=uuid.uuid4(), channel_id=uuid.UUID(self.channel.id), - lang_id='en', + lang_id="en", ) def test_channel_language_exists_valid_channel(self): - ContentNode.objects.filter(node_id=self.public_node.id).update(language_id='en') + ContentNode.objects.filter(node_id=self.public_node.id).update(language_id="en") response = self._perform_action("channel-language-exists", self.channel.id) self.assertEqual(response.status_code, 200, response.content) self.assertTrue(response.json()["exists"]) def test_channel_language_doesnt_exists_valid_channel(self): - PublicContentNode.objects.filter(id=self.public_node.id).update(lang_id='es') + PublicContentNode.objects.filter(id=self.public_node.id).update(lang_id="es") response = self._perform_action("channel-language-exists", self.channel.id) self.assertEqual(response.status_code, 200, response.content) self.assertFalse(response.json()["exists"]) def test_channel_language_exists_invalid_channel(self): - response = self._perform_action("channel-language-exists", 'unknown_channel_id') + response = self._perform_action("channel-language-exists", "unknown_channel_id") self.assertEqual(response.status_code, 404, response.content) def test_channel_language_exists_invalid_request(self): @@ -661,11 +885,15 @@ def test_channel_language_exists_invalid_request(self): self.assertEqual(response.status_code, 404, response.content) def test_get_languages_in_channel_success_languages(self): - new_language = 'swa' + new_language = "swa" self.channel.language_id = new_language self.channel.save() - PublicContentNode.objects.filter(id=self.public_node.id).update(lang_id=new_language) - ContentNode.objects.filter(node_id=self.public_node.id).update(language_id=new_language) + PublicContentNode.objects.filter(id=self.public_node.id).update( + lang_id=new_language + ) + ContentNode.objects.filter(node_id=self.public_node.id).update( + language_id=new_language + ) response = self._perform_action("channel-languages", self.channel.id) languages = response.json()["languages"] @@ -674,12 +902,16 @@ def test_get_languages_in_channel_success_languages(self): self.assertListEqual(languages, [new_language]) def test_get_languages_in_channel_success_channel_language_excluded(self): - new_language = 'fr' - channel_lang = 'en' + new_language = "fr" + channel_lang = "en" self.channel.language_id = channel_lang self.channel.save() - PublicContentNode.objects.filter(id=self.public_node.id).update(lang_id=new_language) - ContentNode.objects.filter(node_id=self.public_node.id).update(language_id=new_language) + PublicContentNode.objects.filter(id=self.public_node.id).update( + lang_id=new_language + ) + ContentNode.objects.filter(node_id=self.public_node.id).update( + language_id=new_language + ) response = self._perform_action("channel-languages", self.channel.id) languages = response.json()["languages"] @@ -704,5 +936,7 @@ def test_get_languages_in_channel_invalid_request(self): def _perform_action(self, url_path, channel_id): user = testdata.user() self.client.force_authenticate(user=user) - response = self.client.get(reverse(url_path, kwargs={"pk": channel_id}), format="json") + response = self.client.get( + reverse(url_path, kwargs={"pk": channel_id}), format="json" + ) return response diff --git a/contentcuration/contentcuration/tests/viewsets/test_channelset.py b/contentcuration/contentcuration/tests/viewsets/test_channelset.py index 19ec846f11..a0f72f7a2d 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_channelset.py +++ b/contentcuration/contentcuration/tests/viewsets/test_channelset.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import uuid from django.urls import reverse @@ -15,7 +13,6 @@ class SyncTestCase(SyncTestMixin, StudioAPITestCase): - @property def channelset_metadata(self): return { @@ -41,7 +38,11 @@ def test_create_channelset(self): self.client.force_authenticate(user=self.user) channelset = self.channelset_metadata response = self.sync_changes( - [generate_create_event(channelset["id"], CHANNELSET, channelset, user_id=self.user.id)], + [ + generate_create_event( + channelset["id"], CHANNELSET, channelset, user_id=self.user.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -55,8 +56,12 @@ def test_create_channelsets(self): channelset2 = self.channelset_metadata response = self.sync_changes( [ - generate_create_event(channelset1["id"], CHANNELSET, channelset1, user_id=self.user.id), - generate_create_event(channelset2["id"], CHANNELSET, channelset2, user_id=self.user.id), + generate_create_event( + channelset1["id"], CHANNELSET, channelset1, user_id=self.user.id + ), + generate_create_event( + channelset2["id"], CHANNELSET, channelset2, user_id=self.user.id + ), ], ) self.assertEqual(response.status_code, 200, response.content) @@ -77,7 +82,11 @@ def test_update_channelset(self): self.client.force_authenticate(user=self.user) response = self.sync_changes( - [generate_update_event(channelset.id, CHANNELSET, {"channels": {}}, user_id=self.user.id)], + [ + generate_update_event( + channelset.id, CHANNELSET, {"channels": {}}, user_id=self.user.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertFalse( @@ -96,8 +105,12 @@ def test_update_channelsets(self): self.client.force_authenticate(user=self.user) response = self.sync_changes( [ - generate_update_event(channelset1.id, CHANNELSET, {"channels": {}}, user_id=self.user.id), - generate_update_event(channelset2.id, CHANNELSET, {"channels": {}}, user_id=self.user.id), + generate_update_event( + channelset1.id, CHANNELSET, {"channels": {}}, user_id=self.user.id + ), + generate_update_event( + channelset2.id, CHANNELSET, {"channels": {}}, user_id=self.user.id + ), ], ) self.assertEqual(response.status_code, 200, response.content) @@ -118,7 +131,11 @@ def test_update_channelset_empty(self): channelset.editors.add(self.user) self.client.force_authenticate(user=self.user) response = self.sync_changes( - [generate_update_event(channelset.id, CHANNELSET, {}, user_id=self.user.id)], + [ + generate_update_event( + channelset.id, CHANNELSET, {}, user_id=self.user.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) @@ -130,7 +147,10 @@ def test_update_channelset_unwriteable_fields(self): response = self.sync_changes( [ generate_update_event( - channelset.id, CHANNELSET, {"not_a_field": "not_a_value"}, user_id=self.user.id + channelset.id, + CHANNELSET, + {"not_a_field": "not_a_value"}, + user_id=self.user.id, ) ], ) @@ -152,7 +172,7 @@ def test_update_channelset_channels(self): channelset.id, CHANNELSET, {"channels.{}".format(channel1.id): True}, - user_id=self.user.id + user_id=self.user.id, ) ], ) @@ -172,7 +192,7 @@ def test_update_channelset_channels(self): channelset.id, CHANNELSET, {"channels.{}".format(channel2.id): True}, - user_id=self.user.id + user_id=self.user.id, ) ], ) @@ -194,7 +214,7 @@ def test_update_channelset_channels(self): channelset.id, CHANNELSET, {"channels.{}".format(channel2.id): None}, - user_id=self.user.id + user_id=self.user.id, ) ], ) @@ -225,7 +245,7 @@ def test_update_channelset_channels_no_permission(self): channelset.id, CHANNELSET, {"channels.{}".format(channel1.id): True}, - user_id=self.user.id + user_id=self.user.id, ) ], ) @@ -306,7 +326,9 @@ def test_create_channelset(self): self.client.force_authenticate(user=self.user) channelset = self.channelset_metadata response = self.client.post( - reverse("channelset-list"), channelset, format="json", + reverse("channelset-list"), + channelset, + format="json", ) self.assertEqual(response.status_code, 201, response.content) try: @@ -320,7 +342,9 @@ def test_create_channelset_no_channel_permission(self): channelset = self.channelset_metadata channelset["channels"] = {new_channel.id: True} response = self.client.post( - reverse("channelset-list"), channelset, format="json", + reverse("channelset-list"), + channelset, + format="json", ) self.assertEqual(response.status_code, 400, response.content) diff --git a/contentcuration/contentcuration/tests/viewsets/test_clipboard.py b/contentcuration/contentcuration/tests/viewsets/test_clipboard.py index 59113c0532..9f88dfd58d 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_clipboard.py +++ b/contentcuration/contentcuration/tests/viewsets/test_clipboard.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import uuid from django.core.management import call_command @@ -58,7 +56,11 @@ def test_create_clipboard(self): self.client.force_authenticate(user=self.user) clipboard = self.clipboard_metadata response = self.sync_changes( - [generate_create_event(clipboard["id"], CLIPBOARD, clipboard, user_id=self.user.id)], + [ + generate_create_event( + clipboard["id"], CLIPBOARD, clipboard, user_id=self.user.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -71,7 +73,11 @@ def test_create_clipboard_with_null_extra_fields(self): clipboard = self.clipboard_metadata clipboard["extra_fields"] = None response = self.sync_changes( - [generate_create_event(clipboard["id"], CLIPBOARD, clipboard, user_id=self.user.id)], + [ + generate_create_event( + clipboard["id"], CLIPBOARD, clipboard, user_id=self.user.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -86,7 +92,11 @@ def test_create_clipboard_with_parent(self): clipboard = self.clipboard_metadata clipboard["parent"] = channel.main_tree_id response = self.sync_changes( - [generate_create_event(clipboard["id"], CLIPBOARD, clipboard, user_id=self.user.id)], + [ + generate_create_event( + clipboard["id"], CLIPBOARD, clipboard, user_id=self.user.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -102,8 +112,12 @@ def test_create_clipboards(self): clipboard2 = self.clipboard_metadata response = self.sync_changes( [ - generate_create_event(clipboard1["id"], CLIPBOARD, clipboard1, user_id=self.user.id), - generate_create_event(clipboard2["id"], CLIPBOARD, clipboard2, user_id=self.user.id), + generate_create_event( + clipboard1["id"], CLIPBOARD, clipboard1, user_id=self.user.id + ), + generate_create_event( + clipboard2["id"], CLIPBOARD, clipboard2, user_id=self.user.id + ), ], ) self.assertEqual(response.status_code, 200, response.content) @@ -241,7 +255,9 @@ def test_create_clipboard(self): self.client.force_authenticate(user=self.user) clipboard = self.clipboard_metadata response = self.client.post( - reverse("clipboard-list"), clipboard, format="json", + reverse("clipboard-list"), + clipboard, + format="json", ) self.assertEqual(response.status_code, 405, response.content) diff --git a/contentcuration/contentcuration/tests/viewsets/test_contentnode.py b/contentcuration/contentcuration/tests/viewsets/test_contentnode.py index cf2a6fe3d5..d27b4304d9 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_contentnode.py +++ b/contentcuration/contentcuration/tests/viewsets/test_contentnode.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import uuid import mock @@ -17,7 +15,9 @@ from le_utils.constants import content_kinds from le_utils.constants import exercises from le_utils.constants import roles -from le_utils.constants.labels.accessibility_categories import ACCESSIBILITYCATEGORIESLIST +from le_utils.constants.labels.accessibility_categories import ( + ACCESSIBILITYCATEGORIESLIST, +) from le_utils.constants.labels.subjects import SUBJECTSLIST from contentcuration import models @@ -265,17 +265,27 @@ def assertQuerysetPKs(self, expected_qs, actual_qs): self.assertEqual(expected_pk, actual_pk) def test_filter_ancestors_of(self): - target = models.ContentNode.objects.get(node_id="00000000000000000000000000000003") - queryset = self.filter.filter_ancestors_of(models.ContentNode.objects.all(), None, target.pk) + target = models.ContentNode.objects.get( + node_id="00000000000000000000000000000003" + ) + queryset = self.filter.filter_ancestors_of( + models.ContentNode.objects.all(), None, target.pk + ) self.assertQuerysetPKs(target.get_ancestors(include_self=True), queryset) def test_filter_ancestors_of__root_node(self): - queryset = self.filter.filter_ancestors_of(models.ContentNode.objects.all(), None, self.root.pk) - self.assertQuerysetPKs(models.ContentNode.objects.filter(pk=self.root.pk), queryset) + queryset = self.filter.filter_ancestors_of( + models.ContentNode.objects.all(), None, self.root.pk + ) + self.assertQuerysetPKs( + models.ContentNode.objects.filter(pk=self.root.pk), queryset + ) def test_filter_ancestors_of__missing_target(self): - queryset = self.filter.filter_ancestors_of(models.ContentNode.objects.all(), None, "nonexistant ID") + queryset = self.filter.filter_ancestors_of( + models.ContentNode.objects.all(), None, "nonexistant ID" + ) self.assertQuerysetPKs(models.ContentNode.objects.none(), queryset) @@ -292,7 +302,8 @@ def test_get_contentnode__editor(self): self.client.force_authenticate(user=user) with self.settings(TEST_ENV=False): response = self.client.get( - self.viewset_url(pk=contentnode.id), format="json", + self.viewset_url(pk=contentnode.id), + format="json", ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual(response.data["id"], contentnode.id) @@ -307,7 +318,8 @@ def test_get_contentnode__viewer(self): self.client.force_authenticate(user=user) with self.settings(TEST_ENV=False): response = self.client.get( - self.viewset_url(pk=contentnode.id), format="json", + self.viewset_url(pk=contentnode.id), + format="json", ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual(response.data["id"], contentnode.id) @@ -321,7 +333,8 @@ def test_get_contentnode__no_permssion(self): self.client.force_authenticate(user=user) with self.settings(TEST_ENV=False): response = self.client.get( - self.viewset_url(pk=contentnode.id), format="json", + self.viewset_url(pk=contentnode.id), + format="json", ) self.assertEqual(response.status_code, 404, response.content) @@ -331,7 +344,8 @@ def test_get_contentnode__unauthenticated(self): with self.settings(TEST_ENV=False): response = self.client.get( - self.viewset_url(pk=contentnode.id), format="json", + self.viewset_url(pk=contentnode.id), + format="json", ) self.assertEqual(response.status_code, 403, response.content) @@ -343,7 +357,8 @@ def test_public_get_contentnode__unauthenticated(self): with self.settings(TEST_ENV=False): response = self.client.get( - self.viewset_url(pk=contentnode.id), format="json", + self.viewset_url(pk=contentnode.id), + format="json", ) self.assertEqual(response.status_code, 403, response.content) @@ -362,19 +377,38 @@ def test_consolidate_extra_fields(self): "m": 3, "n": 6, "mastery_model": exercises.M_OF_N, - } + }, ) self.client.force_authenticate(user=user) with self.settings(TEST_ENV=False): response = self.client.get( - self.viewset_url(pk=contentnode.id), format="json", + self.viewset_url(pk=contentnode.id), + format="json", ) self.assertEqual(response.status_code, 200, response.content) - self.assertEqual(response.data["extra_fields"]["options"]["completion_criteria"]["threshold"]["m"], 3) - self.assertEqual(response.data["extra_fields"]["options"]["completion_criteria"]["threshold"]["n"], 6) - self.assertEqual(response.data["extra_fields"]["options"]["completion_criteria"]["threshold"]["mastery_model"], exercises.M_OF_N) - self.assertEqual(response.data["extra_fields"]["options"]["completion_criteria"]["model"], completion_criteria.MASTERY) + self.assertEqual( + response.data["extra_fields"]["options"]["completion_criteria"][ + "threshold" + ]["m"], + 3, + ) + self.assertEqual( + response.data["extra_fields"]["options"]["completion_criteria"][ + "threshold" + ]["n"], + 6, + ) + self.assertEqual( + response.data["extra_fields"]["options"]["completion_criteria"][ + "threshold" + ]["mastery_model"], + exercises.M_OF_N, + ) + self.assertEqual( + response.data["extra_fields"]["options"]["completion_criteria"]["model"], + completion_criteria.MASTERY, + ) def test_consolidate_extra_fields_with_mastrey_model_none(self): @@ -389,24 +423,23 @@ def test_consolidate_extra_fields_with_mastrey_model_none(self): description="India is the hottest country in the world", parent_id=channel.main_tree_id, extra_fields={ - "m": None, "n": None, "mastery_model": None, - } + }, ) self.client.force_authenticate(user=user) with self.settings(TEST_ENV=False): response = self.client.get( - self.viewset_url(pk=contentnode.id), format="json", + self.viewset_url(pk=contentnode.id), + format="json", ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual(response.data["extra_fields"], {}) class SyncTestCase(SyncTestMixin, StudioAPITestCase): - def setUp(self): super(SyncTestCase, self).setUp() self.channel = testdata.channel() @@ -438,7 +471,14 @@ def test_create_contentnode_no_permissions(self): self.channel.editors.remove(self.user) contentnode = self.contentnode_metadata response = self.sync_changes( - [generate_create_event(contentnode["id"], CONTENTNODE, contentnode, channel_id=self.channel.id)], + [ + generate_create_event( + contentnode["id"], + CONTENTNODE, + contentnode, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) with self.assertRaises(models.ContentNode.DoesNotExist): @@ -448,7 +488,14 @@ def test_create_contentnode_with_parent(self): self.channel.editors.add(self.user) contentnode = self.contentnode_metadata response = self.sync_changes( - [generate_create_event(contentnode["id"], CONTENTNODE, contentnode, channel_id=self.channel.id)], + [ + generate_create_event( + contentnode["id"], + CONTENTNODE, + contentnode, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -466,7 +513,14 @@ def test_cannot_create_contentnode(self): contentnode["parent"] = self.channel.main_tree_id response = self.sync_changes( - [generate_create_event(contentnode["id"], CONTENTNODE, contentnode, channel_id=self.channel.id)], + [ + generate_create_event( + contentnode["id"], + CONTENTNODE, + contentnode, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(len(response.data["disallowed"]), 1) try: @@ -480,8 +534,18 @@ def test_create_contentnodes(self): contentnode2 = self.contentnode_metadata response = self.sync_changes( [ - generate_create_event(contentnode1["id"], CONTENTNODE, contentnode1, channel_id=self.channel.id), - generate_create_event(contentnode2["id"], CONTENTNODE, contentnode2, channel_id=self.channel.id), + generate_create_event( + contentnode1["id"], + CONTENTNODE, + contentnode1, + channel_id=self.channel.id, + ), + generate_create_event( + contentnode2["id"], + CONTENTNODE, + contentnode2, + channel_id=self.channel.id, + ), ], ) self.assertEqual(response.status_code, 200, response.content) @@ -509,10 +573,16 @@ def test_cannot_create_some_contentnodes(self): response = self.sync_changes( [ generate_create_event( - contentnode1["id"], CONTENTNODE, contentnode1, channel_id=channel1.id + contentnode1["id"], + CONTENTNODE, + contentnode1, + channel_id=channel1.id, ), generate_create_event( - contentnode2["id"], CONTENTNODE, contentnode2, channel_id=channel2.id + contentnode2["id"], + CONTENTNODE, + contentnode2, + channel_id=channel2.id, ), ], ) @@ -535,7 +605,14 @@ def test_update_contentnode(self): new_title = "This is not the old title" response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"title": new_title}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"title": new_title}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual( @@ -547,7 +624,11 @@ def test_cannot_update_contentnode_parent(self): contentnode2 = models.ContentNode.objects.create(**self.contentnode_db_metadata) self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"parent": contentnode2.id})], + [ + generate_update_event( + contentnode.id, CONTENTNODE, {"parent": contentnode2.id} + ) + ], ) self.assertNotEqual( models.ContentNode.objects.get(id=contentnode.id).parent_id, contentnode2.id @@ -561,7 +642,10 @@ def test_cannot_update_no_permissions(self): response = self.sync_changes( [ generate_update_event( - contentnode.id, CONTENTNODE, {"title": new_title}, channel_id=self.channel.id + contentnode.id, + CONTENTNODE, + {"title": new_title}, + channel_id=self.channel.id, ) ], ) @@ -579,7 +663,11 @@ def test_update_descendants_contentnode(self): new_language = "es" response = self.sync_changes( - [generate_update_descendants_event(root_node.id, {"language": new_language}, channel_id=self.channel.id)], + [ + generate_update_descendants_event( + root_node.id, {"language": new_language}, channel_id=self.channel.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) @@ -591,11 +679,19 @@ def test_update_descendants_contentnode(self): def test_cannot_update_descendants_when_updating_non_topic_node(self): root_node = testdata.tree() - video_node = root_node.get_descendants().filter(kind_id=content_kinds.VIDEO).first() + video_node = ( + root_node.get_descendants().filter(kind_id=content_kinds.VIDEO).first() + ) new_language = "pt" response = self.sync_changes( - [generate_update_descendants_event(video_node.id, {"language": new_language}, channel_id=self.channel.id)], + [ + generate_update_descendants_event( + video_node.id, + {"language": new_language}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(len(response.data["errors"]), 1) @@ -612,20 +708,33 @@ def test_update_contentnode_exercise_mastery_model(self): m = 5 n = 10 response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, { - "extra_fields.options.completion_criteria.threshold.m": m, - "extra_fields.options.completion_criteria.threshold.n": n, - "extra_fields.options.completion_criteria.threshold.mastery_model": exercises.M_OF_N, - "extra_fields.options.completion_criteria.model": completion_criteria.MASTERY - }, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + { + "extra_fields.options.completion_criteria.threshold.m": m, + "extra_fields.options.completion_criteria.threshold.n": n, + "extra_fields.options.completion_criteria.threshold.mastery_model": exercises.M_OF_N, + "extra_fields.options.completion_criteria.model": completion_criteria.MASTERY, + }, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual( - models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"]["completion_criteria"]["threshold"]["m"], m + models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"][ + "completion_criteria" + ]["threshold"]["m"], + m, ) self.assertEqual( - models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"]["completion_criteria"]["threshold"]["n"], n + models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"][ + "completion_criteria" + ]["threshold"]["n"], + n, ) def test_update_contentnode_exercise_mastery_model_partial(self): @@ -648,14 +757,24 @@ def test_update_contentnode_exercise_mastery_model_partial(self): # Update m and n fields m = 4 response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, { - "extra_fields.options.completion_criteria.threshold.m": m, - }, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + { + "extra_fields.options.completion_criteria.threshold.m": m, + }, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual( - models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"]["completion_criteria"]["threshold"]["m"], m + models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"][ + "completion_criteria" + ]["threshold"]["m"], + m, ) def test_update_contentnode_exercise_mastery_model_old(self): @@ -672,23 +791,42 @@ def test_update_contentnode_exercise_mastery_model_old(self): # Update m and n fields m = 4 response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, { - "extra_fields.options.completion_criteria.threshold.m": m, - }, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + { + "extra_fields.options.completion_criteria.threshold.m": m, + }, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual( - models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"]["completion_criteria"]["threshold"]["m"], m + models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"][ + "completion_criteria" + ]["threshold"]["m"], + m, ) self.assertEqual( - models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"]["completion_criteria"]["threshold"]["n"], 10 + models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"][ + "completion_criteria" + ]["threshold"]["n"], + 10, ) self.assertEqual( - models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"]["completion_criteria"]["threshold"]["mastery_model"], exercises.M_OF_N + models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"][ + "completion_criteria" + ]["threshold"]["mastery_model"], + exercises.M_OF_N, ) self.assertEqual( - models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"]["completion_criteria"]["model"], completion_criteria.MASTERY + models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"][ + "completion_criteria" + ]["model"], + completion_criteria.MASTERY, ) def test_update_contentnode_exercise_incomplete_mastery_model_marked_complete(self): @@ -697,16 +835,23 @@ def test_update_contentnode_exercise_incomplete_mastery_model_marked_complete(se contentnode = models.ContentNode.objects.create(**metadata) response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, { - "complete": True, - }, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + { + "complete": True, + }, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) - self.assertFalse( - models.ContentNode.objects.get(id=contentnode.id).complete - ) - change = models.Change.objects.filter(channel=self.channel, change_type=UPDATED, table=CONTENTNODE).last() + self.assertFalse(models.ContentNode.objects.get(id=contentnode.id).complete) + change = models.Change.objects.filter( + channel=self.channel, change_type=UPDATED, table=CONTENTNODE + ).last() self.assertFalse(change.kwargs["mods"]["complete"]) def test_update_contentnode_extra_fields(self): @@ -714,11 +859,19 @@ def test_update_contentnode_extra_fields(self): # Update extra_fields.randomize randomize = True response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"extra_fields.randomize": randomize}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"extra_fields.randomize": randomize}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual( - models.ContentNode.objects.get(id=contentnode.id).extra_fields["randomize"], randomize + models.ContentNode.objects.get(id=contentnode.id).extra_fields["randomize"], + randomize, ) def test_update_contentnode_add_to_extra_fields_nested(self): @@ -726,10 +879,22 @@ def test_update_contentnode_add_to_extra_fields_nested(self): contentnode = models.ContentNode.objects.create(**metadata) # Add extra_fields.options.modality response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"extra_fields.options.modality": "QUIZ"}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"extra_fields.options.modality": "QUIZ"}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) - self.assertEqual(models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"]["modality"], "QUIZ") + self.assertEqual( + models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"][ + "modality" + ], + "QUIZ", + ) def test_update_contentnode_remove_from_extra_fields_nested(self): metadata = self.contentnode_db_metadata @@ -741,11 +906,20 @@ def test_update_contentnode_remove_from_extra_fields_nested(self): contentnode = models.ContentNode.objects.create(**metadata) # Remove extra_fields.options.modality response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"extra_fields.options.modality": None}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"extra_fields.options.modality": None}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) with self.assertRaises(KeyError): - models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"]["modality"] + models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"][ + "modality" + ] def test_update_contentnode_update_options_completion_criteria(self): metadata = self.contentnode_db_metadata @@ -767,17 +941,22 @@ def test_update_contentnode_update_options_completion_criteria(self): CONTENTNODE, { "extra_fields.options.completion_criteria.model": completion_criteria.TIME, - "extra_fields.options.completion_criteria.threshold": 10 + "extra_fields.options.completion_criteria.threshold": 10, }, - channel_id=self.channel.id + channel_id=self.channel.id, ) ], ) self.assertEqual(response.status_code, 200, response.content) c = models.ContentNode.objects.get(id=contentnode.id) - self.assertEqual(c.extra_fields["options"]["completion_criteria"]["model"], completion_criteria.TIME) - self.assertEqual(c.extra_fields["options"]["completion_criteria"]["threshold"], 10) + self.assertEqual( + c.extra_fields["options"]["completion_criteria"]["model"], + completion_criteria.TIME, + ) + self.assertEqual( + c.extra_fields["options"]["completion_criteria"]["threshold"], 10 + ) def test_update_contentnode_update_options_completion_criteria_threshold_only(self): metadata = self.contentnode_db_metadata @@ -797,18 +976,21 @@ def test_update_contentnode_update_options_completion_criteria_threshold_only(se generate_update_event( contentnode.id, CONTENTNODE, - { - "extra_fields.options.completion_criteria.threshold": 10 - }, - channel_id=self.channel.id + {"extra_fields.options.completion_criteria.threshold": 10}, + channel_id=self.channel.id, ) ], ) self.assertEqual(response.status_code, 200, response.content) c = models.ContentNode.objects.get(id=contentnode.id) - self.assertEqual(c.extra_fields["options"]["completion_criteria"]["model"], completion_criteria.TIME) - self.assertEqual(c.extra_fields["options"]["completion_criteria"]["threshold"], 10) + self.assertEqual( + c.extra_fields["options"]["completion_criteria"]["model"], + completion_criteria.TIME, + ) + self.assertEqual( + c.extra_fields["options"]["completion_criteria"]["threshold"], 10 + ) def test_update_completion_criteria_model_to_determined_by_resource_edge_case(self): metadata = self.contentnode_db_metadata @@ -818,38 +1000,44 @@ def test_update_completion_criteria_model_to_determined_by_resource_edge_case(se "completion_criteria": { "model": completion_criteria.REFERENCE, "threshold": None, - "learner_managed": False + "learner_managed": False, } } } contentnode = models.ContentNode.objects.create(**metadata) response = self.sync_changes( - [ - generate_update_event( - contentnode.id, - CONTENTNODE, - { - "complete": True, - "extra_fields.options.completion_criteria.threshold": 600, - "extra_fields.options.completion_criteria.model": completion_criteria.APPROX_TIME - }, - channel_id=self.channel.id - ), - generate_update_event( - contentnode.id, - CONTENTNODE, - { - "extra_fields.options.completion_criteria.model": completion_criteria.DETERMINED_BY_RESOURCE - }, - channel_id=self.channel.id - ) - ], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + { + "complete": True, + "extra_fields.options.completion_criteria.threshold": 600, + "extra_fields.options.completion_criteria.model": completion_criteria.APPROX_TIME, + }, + channel_id=self.channel.id, + ), + generate_update_event( + contentnode.id, + CONTENTNODE, + { + "extra_fields.options.completion_criteria.model": completion_criteria.DETERMINED_BY_RESOURCE + }, + channel_id=self.channel.id, + ), + ], ) self.assertEqual(len(response.data["errors"]), 0) updated_contentnode = models.ContentNode.objects.get(id=contentnode.id) - self.assertEqual(updated_contentnode.extra_fields["options"]["completion_criteria"]["model"], completion_criteria.DETERMINED_BY_RESOURCE) - self.assertNotIn("threshold", updated_contentnode.extra_fields["options"]["completion_criteria"]) + self.assertEqual( + updated_contentnode.extra_fields["options"]["completion_criteria"]["model"], + completion_criteria.DETERMINED_BY_RESOURCE, + ) + self.assertNotIn( + "threshold", + updated_contentnode.extra_fields["options"]["completion_criteria"], + ) def test_update_contentnode_update_options_invalid_completion_criteria(self): metadata = self.contentnode_db_metadata @@ -874,15 +1062,20 @@ def test_update_contentnode_update_options_invalid_completion_criteria(self): "complete": True, "extra_fields.options.completion_criteria.model": completion_criteria.TIME, }, - channel_id=self.channel.id + channel_id=self.channel.id, ) ], ) self.assertEqual(response.status_code, 200, response.content) c = models.ContentNode.objects.get(id=contentnode.id) - self.assertEqual(c.extra_fields["options"]["completion_criteria"]["model"], completion_criteria.REFERENCE) - self.assertEqual(c.extra_fields["options"]["completion_criteria"]["threshold"], None) + self.assertEqual( + c.extra_fields["options"]["completion_criteria"]["model"], + completion_criteria.REFERENCE, + ) + self.assertEqual( + c.extra_fields["options"]["completion_criteria"]["threshold"], None + ) def test_update_contentnode_add_multiple_metadata_labels(self): contentnode = models.ContentNode.objects.create(**self.contentnode_db_metadata) @@ -892,44 +1085,90 @@ def test_update_contentnode_add_multiple_metadata_labels(self): generate_update_event( contentnode.id, CONTENTNODE, - {"accessibility_labels.{}".format(ACCESSIBILITYCATEGORIESLIST[0]): True}, - channel_id=self.channel.id + { + "accessibility_labels.{}".format( + ACCESSIBILITYCATEGORIESLIST[0] + ): True + }, + channel_id=self.channel.id, ) ], ) self.assertEqual(response.status_code, 200, response.content) - self.assertTrue(models.ContentNode.objects.get(id=contentnode.id).accessibility_labels[ACCESSIBILITYCATEGORIESLIST[0]]) + self.assertTrue( + models.ContentNode.objects.get(id=contentnode.id).accessibility_labels[ + ACCESSIBILITYCATEGORIESLIST[0] + ] + ) response = self.sync_changes( [ generate_update_event( contentnode.id, CONTENTNODE, - {"accessibility_labels.{}".format(ACCESSIBILITYCATEGORIESLIST[1]): True}, - channel_id=self.channel.id + { + "accessibility_labels.{}".format( + ACCESSIBILITYCATEGORIESLIST[1] + ): True + }, + channel_id=self.channel.id, ) ], ) self.assertEqual(response.status_code, 200, response.content) - self.assertTrue(models.ContentNode.objects.get(id=contentnode.id).accessibility_labels[ACCESSIBILITYCATEGORIESLIST[0]]) - self.assertTrue(models.ContentNode.objects.get(id=contentnode.id).accessibility_labels[ACCESSIBILITYCATEGORIESLIST[1]]) + self.assertTrue( + models.ContentNode.objects.get(id=contentnode.id).accessibility_labels[ + ACCESSIBILITYCATEGORIESLIST[0] + ] + ) + self.assertTrue( + models.ContentNode.objects.get(id=contentnode.id).accessibility_labels[ + ACCESSIBILITYCATEGORIESLIST[1] + ] + ) def test_update_contentnode_add_multiple_nested_metadata_labels(self): contentnode = models.ContentNode.objects.create(**self.contentnode_db_metadata) # Add metadata label to categories response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"categories.{}".format(nested_subjects[0]): True}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"categories.{}".format(nested_subjects[0]): True}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) - self.assertTrue(models.ContentNode.objects.get(id=contentnode.id).categories[nested_subjects[0]]) + self.assertTrue( + models.ContentNode.objects.get(id=contentnode.id).categories[ + nested_subjects[0] + ] + ) response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"categories.{}".format(nested_subjects[1]): True}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"categories.{}".format(nested_subjects[1]): True}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) - self.assertTrue(models.ContentNode.objects.get(id=contentnode.id).categories[nested_subjects[0]]) - self.assertTrue(models.ContentNode.objects.get(id=contentnode.id).categories[nested_subjects[1]]) + self.assertTrue( + models.ContentNode.objects.get(id=contentnode.id).categories[ + nested_subjects[0] + ] + ) + self.assertTrue( + models.ContentNode.objects.get(id=contentnode.id).categories[ + nested_subjects[1] + ] + ) def test_update_contentnode_remove_metadata_label(self): metadata = self.contentnode_db_metadata @@ -942,14 +1181,20 @@ def test_update_contentnode_remove_metadata_label(self): generate_update_event( contentnode.id, CONTENTNODE, - {"accessibility_labels.{}".format(ACCESSIBILITYCATEGORIESLIST[0]): None}, - channel_id=self.channel.id + { + "accessibility_labels.{}".format( + ACCESSIBILITYCATEGORIESLIST[0] + ): None + }, + channel_id=self.channel.id, ) ], ) self.assertEqual(response.status_code, 200, response.content) with self.assertRaises(KeyError): - models.ContentNode.objects.get(id=contentnode.id).accessibility_labels[ACCESSIBILITYCATEGORIESLIST[0]] + models.ContentNode.objects.get(id=contentnode.id).accessibility_labels[ + ACCESSIBILITYCATEGORIESLIST[0] + ] def test_update_contentnode_remove_nested_metadata_label(self): metadata = self.contentnode_db_metadata @@ -958,11 +1203,20 @@ def test_update_contentnode_remove_nested_metadata_label(self): contentnode = models.ContentNode.objects.create(**self.contentnode_db_metadata) # Add metadata label to categories response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"categories.{}".format(nested_subjects[0]): None}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"categories.{}".format(nested_subjects[0]): None}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) with self.assertRaises(KeyError): - models.ContentNode.objects.get(id=contentnode.id).categories[nested_subjects[0]] + models.ContentNode.objects.get(id=contentnode.id).categories[ + nested_subjects[0] + ] def test_update_contentnode_tags(self): contentnode = models.ContentNode.objects.create(**self.contentnode_db_metadata) @@ -971,7 +1225,10 @@ def test_update_contentnode_tags(self): response = self.sync_changes( [ generate_update_event( - contentnode.id, CONTENTNODE, {"tags.{}".format(tag): True}, channel_id=self.channel.id + contentnode.id, + CONTENTNODE, + {"tags.{}".format(tag): True}, + channel_id=self.channel.id, ) ], ) @@ -987,7 +1244,10 @@ def test_update_contentnode_tags(self): response = self.sync_changes( [ generate_update_event( - contentnode.id, CONTENTNODE, {"tags.{}".format(other_tag): True}, channel_id=self.channel.id + contentnode.id, + CONTENTNODE, + {"tags.{}".format(other_tag): True}, + channel_id=self.channel.id, ) ], ) @@ -1006,7 +1266,10 @@ def test_update_contentnode_tags(self): response = self.sync_changes( [ generate_update_event( - contentnode.id, CONTENTNODE, {"tags.{}".format(other_tag): None}, channel_id=self.channel.id + contentnode.id, + CONTENTNODE, + {"tags.{}".format(other_tag): None}, + channel_id=self.channel.id, ) ], ) @@ -1030,7 +1293,10 @@ def test_update_contentnode_tag_greater_than_30_chars(self): response = self.sync_changes( [ generate_update_event( - contentnode.id, CONTENTNODE, {"tags.{}".format(tag): True}, channel_id=self.channel.id + contentnode.id, + CONTENTNODE, + {"tags.{}".format(tag): True}, + channel_id=self.channel.id, ) ], ) @@ -1046,22 +1312,39 @@ def test_update_contentnode_suggested_duration(self): new_suggested_duration = 600 response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"suggested_duration": new_suggested_duration}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"suggested_duration": new_suggested_duration}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual( - models.ContentNode.objects.get(id=contentnode.id).suggested_duration, new_suggested_duration + models.ContentNode.objects.get(id=contentnode.id).suggested_duration, + new_suggested_duration, ) def test_update_contentnode_extra_fields_inherited_metadata(self): contentnode = models.ContentNode.objects.create(**self.contentnode_db_metadata) response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"extra_fields.inherited_metadata.categories": True}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"extra_fields.inherited_metadata.categories": True}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertTrue( - models.ContentNode.objects.get(id=contentnode.id).extra_fields["inherited_metadata"]["categories"] + models.ContentNode.objects.get(id=contentnode.id).extra_fields[ + "inherited_metadata" + ]["categories"] ) def test_update_contentnode_tags_dont_duplicate(self): @@ -1073,7 +1356,10 @@ def test_update_contentnode_tags_dont_duplicate(self): response = self.sync_changes( [ generate_update_event( - contentnode.id, CONTENTNODE, {"tags.{}".format(tag): True}, channel_id=self.channel.id + contentnode.id, + CONTENTNODE, + {"tags.{}".format(tag): True}, + channel_id=self.channel.id, ) ], ) @@ -1089,7 +1375,14 @@ def test_update_contentnode_tags_list(self): tag = "howzat!" response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"tags": [tag]}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"tags": [tag]}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(len(response.data["errors"]), 1) @@ -1101,10 +1394,16 @@ def test_update_contentnodes(self): response = self.sync_changes( [ generate_update_event( - contentnode1.id, CONTENTNODE, {"title": new_title}, channel_id=self.channel.id + contentnode1.id, + CONTENTNODE, + {"title": new_title}, + channel_id=self.channel.id, ), generate_update_event( - contentnode2.id, CONTENTNODE, {"title": new_title}, channel_id=self.channel.id + contentnode2.id, + CONTENTNODE, + {"title": new_title}, + channel_id=self.channel.id, ), ], ) @@ -1129,10 +1428,16 @@ def test_cannot_update_some_contentnodes(self): response = self.sync_changes( [ generate_update_event( - contentnode1.id, CONTENTNODE, {"title": new_title}, channel_id=channel1.id + contentnode1.id, + CONTENTNODE, + {"title": new_title}, + channel_id=channel1.id, ), generate_update_event( - contentnode2.id, CONTENTNODE, {"title": new_title}, channel_id=channel2.id + contentnode2.id, + CONTENTNODE, + {"title": new_title}, + channel_id=channel2.id, ), ], ) @@ -1152,7 +1457,14 @@ def test_update_contentnode_updates_last_modified(self): new_title = "This is not the old title" response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"title": new_title}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"title": new_title}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) updated_node = models.ContentNode.objects.get(id=contentnode.id) @@ -1162,7 +1474,11 @@ def test_delete_contentnode(self): contentnode = models.ContentNode.objects.create(**self.contentnode_db_metadata) response = self.sync_changes( - [generate_delete_event(contentnode.id, CONTENTNODE, channel_id=self.channel.id)], + [ + generate_delete_event( + contentnode.id, CONTENTNODE, channel_id=self.channel.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -1176,7 +1492,11 @@ def test_cannot_delete_contentnode_no_permissions(self): contentnode = create_and_get_contentnode(self.channel.main_tree_id) response = self.sync_changes( - [generate_delete_event(contentnode.id, CONTENTNODE, channel_id=self.channel.id)], + [ + generate_delete_event( + contentnode.id, CONTENTNODE, channel_id=self.channel.id + ) + ], ) # Return a 200 here rather than a 404. self.assertEqual(response.status_code, 200, response.content) @@ -1191,8 +1511,12 @@ def test_delete_contentnodes(self): self.sync_changes( [ - generate_delete_event(contentnode1.id, CONTENTNODE, channel_id=self.channel.id), - generate_delete_event(contentnode2.id, CONTENTNODE, channel_id=self.channel.id), + generate_delete_event( + contentnode1.id, CONTENTNODE, channel_id=self.channel.id + ), + generate_delete_event( + contentnode2.id, CONTENTNODE, channel_id=self.channel.id + ), ], ) try: @@ -1218,8 +1542,12 @@ def test_cannot_delete_some_contentnodes(self): response = self.sync_changes( [ - generate_delete_event(contentnode1.id, CONTENTNODE, channel_id=channel1.id), - generate_delete_event(contentnode2.id, CONTENTNODE, channel_id=channel2.id), + generate_delete_event( + contentnode1.id, CONTENTNODE, channel_id=channel1.id + ), + generate_delete_event( + contentnode2.id, CONTENTNODE, channel_id=channel2.id + ), ], ) self.assertEqual(len(response.data["disallowed"]), 1) @@ -1241,7 +1569,11 @@ def test_copy_contentnode(self): response = self.sync_changes( [ generate_copy_event( - new_node_id, CONTENTNODE, contentnode.id, self.channel.main_tree_id, channel_id=self.channel.id + new_node_id, + CONTENTNODE, + contentnode.id, + self.channel.main_tree_id, + channel_id=self.channel.id, ) ], ) @@ -1261,7 +1593,11 @@ def test_copy_contentnode_finalization_does_not_make_publishable(self): response = self.sync_changes( [ generate_copy_event( - new_node_id, CONTENTNODE, contentnode.id, self.channel.main_tree_id, channel_id=self.channel.id + new_node_id, + CONTENTNODE, + contentnode.id, + self.channel.main_tree_id, + channel_id=self.channel.id, ), # Save a published change for the channel, so that the finalization change will be generated # after the publish change, and we can check that it is properly not making the channel appear publishable. @@ -1280,7 +1616,11 @@ def test_cannot_copy_contentnode__source_permission(self): response = self.sync_changes( [ generate_copy_event( - new_node_id, CONTENTNODE, contentnode.id, self.channel.main_tree_id, channel_id=self.channel.id + new_node_id, + CONTENTNODE, + contentnode.id, + self.channel.main_tree_id, + channel_id=self.channel.id, ) ], ) @@ -1300,7 +1640,11 @@ def test_cannot_copy_contentnode__target_permission(self): response = self.sync_changes( [ generate_copy_event( - new_node_id, CONTENTNODE, contentnode.id, self.channel.main_tree_id, channel_id=self.channel.id + new_node_id, + CONTENTNODE, + contentnode.id, + self.channel.main_tree_id, + channel_id=self.channel.id, ) ], ) @@ -1319,7 +1663,14 @@ def test_create_contentnode_moveable(self): """ contentnode = self.contentnode_metadata response = self.sync_changes( - [generate_create_event(contentnode["id"], CONTENTNODE, contentnode, channel_id=self.channel.id)], + [ + generate_create_event( + contentnode["id"], + CONTENTNODE, + contentnode, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -1350,7 +1701,11 @@ def test_copy_contentnode_moveable(self): response = self.sync_changes( [ generate_copy_event( - new_node_id, CONTENTNODE, contentnode.id, self.channel.main_tree_id, channel_id=self.channel.id + new_node_id, + CONTENTNODE, + contentnode.id, + self.channel.main_tree_id, + channel_id=self.channel.id, ) ], ) @@ -1394,7 +1749,11 @@ def test_delete_orphanage_root(self): models.ContentNode.objects.create(**self.contentnode_db_metadata) response = self.sync_changes( - [generate_delete_event(settings.ORPHANAGE_ROOT_ID, CONTENTNODE, channel_id=self.channel.id)], + [ + generate_delete_event( + settings.ORPHANAGE_ROOT_ID, CONTENTNODE, channel_id=self.channel.id + ) + ], ) # We return 200 even when a deletion is not found, but it should # still not actually delete it. @@ -1411,10 +1770,16 @@ def test_create_prerequisites(self): response = self.sync_changes( [ generate_create_event( - [contentnode.id, prereq.id], CONTENTNODE_PREREQUISITE, {}, channel_id=self.channel.id + [contentnode.id, prereq.id], + CONTENTNODE_PREREQUISITE, + {}, + channel_id=self.channel.id, ), generate_create_event( - [postreq.id, contentnode.id], CONTENTNODE_PREREQUISITE, {}, channel_id=self.channel.id + [postreq.id, contentnode.id], + CONTENTNODE_PREREQUISITE, + {}, + channel_id=self.channel.id, ), ], ) @@ -1427,7 +1792,10 @@ def test_create_self_referential_prerequisite(self): response = self.sync_changes( [ generate_create_event( - [contentnode.id, contentnode.id], CONTENTNODE_PREREQUISITE, {}, channel_id=self.channel.id + [contentnode.id, contentnode.id], + CONTENTNODE_PREREQUISITE, + {}, + channel_id=self.channel.id, ), ], ) @@ -1443,7 +1811,10 @@ def test_create_cyclic_prerequisite(self): response = self.sync_changes( [ generate_create_event( - [prereq.id, contentnode.id], CONTENTNODE_PREREQUISITE, {}, channel_id=self.channel.id + [prereq.id, contentnode.id], + CONTENTNODE_PREREQUISITE, + {}, + channel_id=self.channel.id, ), ], ) @@ -1458,7 +1829,10 @@ def test_create_cross_tree_prerequisite(self): response = self.sync_changes( [ generate_create_event( - [contentnode.id, prereq.id], CONTENTNODE_PREREQUISITE, {}, channel_id=self.channel.id + [contentnode.id, prereq.id], + CONTENTNODE_PREREQUISITE, + {}, + channel_id=self.channel.id, ), ], ) @@ -1472,7 +1846,10 @@ def test_create_no_permission_prerequisite(self): response = self.sync_changes( [ generate_create_event( - [contentnode.id, prereq.id], CONTENTNODE_PREREQUISITE, {}, channel_id=self.channel.id + [contentnode.id, prereq.id], + CONTENTNODE_PREREQUISITE, + {}, + channel_id=self.channel.id, ), ], ) @@ -1492,10 +1869,14 @@ def test_delete_prerequisites(self): response = self.sync_changes( [ generate_delete_event( - [contentnode.id, prereq.id], CONTENTNODE_PREREQUISITE, channel_id=self.channel.id + [contentnode.id, prereq.id], + CONTENTNODE_PREREQUISITE, + channel_id=self.channel.id, ), generate_delete_event( - [postreq.id, contentnode.id], CONTENTNODE_PREREQUISITE, channel_id=self.channel.id + [postreq.id, contentnode.id], + CONTENTNODE_PREREQUISITE, + channel_id=self.channel.id, ), ], ) @@ -1513,7 +1894,9 @@ def test_delete_no_permission_prerequisite(self): response = self.sync_changes( [ generate_delete_event( - [contentnode.id, prereq.id], CONTENTNODE_PREREQUISITE, channel_id=self.channel.id + [contentnode.id, prereq.id], + CONTENTNODE_PREREQUISITE, + channel_id=self.channel.id, ), ], ) @@ -1522,7 +1905,6 @@ def test_delete_no_permission_prerequisite(self): class CRUDTestCase(StudioAPITestCase): - def setUp(self): super(CRUDTestCase, self).setUp() self.channel = testdata.channel() @@ -1553,14 +1935,17 @@ def contentnode_db_metadata(self): def test_fetch_contentnode(self): contentnode = models.ContentNode.objects.create(**self.contentnode_db_metadata) response = self.client.get( - reverse("contentnode-detail", kwargs={"pk": contentnode.id}), format="json", + reverse("contentnode-detail", kwargs={"pk": contentnode.id}), + format="json", ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual(response.data["id"], contentnode.id) def test_fetch_contentnode__by_parent(self): - channel = models.Channel.objects.create(actor_id=self.user.id, name="Test channel") + channel = models.Channel.objects.create( + actor_id=self.user.id, name="Test channel" + ) channel.editors.add(self.user) channel.save() @@ -1569,14 +1954,18 @@ def test_fetch_contentnode__by_parent(self): contentnode = models.ContentNode.objects.create(**metadata) response = self.client.get( - reverse("contentnode-list"), format="json", data={"parent": channel.main_tree_id}, + reverse("contentnode-list"), + format="json", + data={"parent": channel.main_tree_id}, ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual(len(response.data), 1) self.assertEqual(response.data[0]["id"], contentnode.id) def test_fetch_contentnode__by_node_id_channel_id(self): - channel = models.Channel.objects.create(actor_id=self.user.id, name="Test channel") + channel = models.Channel.objects.create( + actor_id=self.user.id, name="Test channel" + ) channel.editors.add(self.user) channel.save() @@ -1634,7 +2023,9 @@ def test_fetch_requisites(self): def test_create_contentnode(self): contentnode = self.contentnode_metadata response = self.client.post( - reverse("contentnode-list"), contentnode, format="json", + reverse("contentnode-list"), + contentnode, + format="json", ) self.assertEqual(response.status_code, 405, response.content) @@ -1675,7 +2066,7 @@ def test_resource_size(self): total_size = sum(files_map.values()) - self.assertEqual(response.data.get('size', 0), total_size) + self.assertEqual(response.data.get("size", 0), total_size) class AnnotationsTest(StudioAPITestCase): diff --git a/contentcuration/contentcuration/tests/viewsets/test_file.py b/contentcuration/contentcuration/tests/viewsets/test_file.py index 2a4e4e2376..9737c7f4bd 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_file.py +++ b/contentcuration/contentcuration/tests/viewsets/test_file.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import uuid from django.urls import reverse @@ -19,7 +17,6 @@ class SyncTestCase(SyncTestMixin, StudioAPITestCase): - @property def file_metadata(self): return { @@ -66,8 +63,12 @@ def test_cannot_create_files(self): file2 = self.file_metadata response = self.sync_changes( [ - generate_create_event(file1["id"], FILE, file1, channel_id=self.channel.id), - generate_create_event(file2["id"], FILE, file2, channel_id=self.channel.id), + generate_create_event( + file1["id"], FILE, file1, channel_id=self.channel.id + ), + generate_create_event( + file2["id"], FILE, file2, channel_id=self.channel.id + ), ], ) self.assertEqual(len(response.data["errors"]), 2) @@ -89,11 +90,16 @@ def test_update_file(self): new_preset = format_presets.VIDEO_HIGH_RES response = self.sync_changes( - [generate_update_event(file.id, FILE, {"preset": new_preset}, channel_id=self.channel.id)], + [ + generate_update_event( + file.id, FILE, {"preset": new_preset}, channel_id=self.channel.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual( - models.File.objects.get(id=file.id).preset_id, new_preset, + models.File.objects.get(id=file.id).preset_id, + new_preset, ) def test_update_file_no_channel(self): @@ -102,11 +108,19 @@ def test_update_file_no_channel(self): file = models.File.objects.create(**file_metadata) response = self.sync_changes( - [generate_update_event(file.id, FILE, {"contentnode": contentnode_id}, channel_id=self.channel.id)], + [ + generate_update_event( + file.id, + FILE, + {"contentnode": contentnode_id}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual( - models.File.objects.get(id=file.id).contentnode_id, contentnode_id, + models.File.objects.get(id=file.id).contentnode_id, + contentnode_id, ) def test_update_file_with_complete_contentnode(self): @@ -120,7 +134,7 @@ def test_update_file_with_complete_contentnode(self): parent=self.channel.main_tree, license_id=models.License.objects.first().id, license_description="don't do this!", - copyright_holder="Some person" + copyright_holder="Some person", ) errors = complete_except_no_file.mark_complete() complete_except_no_file.save() @@ -130,15 +144,19 @@ def test_update_file_with_complete_contentnode(self): self.assertEqual(complete_except_no_file.complete, False) self.sync_changes( - [generate_update_event(file.id, FILE, {"contentnode": complete_except_no_file.id}, channel_id=self.channel.id)], + [ + generate_update_event( + file.id, + FILE, + {"contentnode": complete_except_no_file.id}, + channel_id=self.channel.id, + ) + ], ) # We should see two Changes, one of them should be for the CONTENTNODE table self.assertEqual(models.Change.objects.count(), 2) - self.assertEqual( - models.Change.objects.filter(table=CONTENTNODE).count(), - 1 - ) + self.assertEqual(models.Change.objects.filter(table=CONTENTNODE).count(), 1) complete_except_no_file.refresh_from_db() @@ -151,11 +169,16 @@ def test_update_file_no_channel_permission(self): self.channel.editors.remove(self.user) response = self.sync_changes( - [generate_update_event(file.id, FILE, {"preset": new_preset}, channel_id=self.channel.id)], + [ + generate_update_event( + file.id, FILE, {"preset": new_preset}, channel_id=self.channel.id + ) + ], ) self.assertEqual(len(response.data["disallowed"]), 1) self.assertNotEqual( - models.File.objects.get(id=file.id).preset_id, new_preset, + models.File.objects.get(id=file.id).preset_id, + new_preset, ) def test_update_file_no_channel_edit_permission(self): @@ -166,11 +189,16 @@ def test_update_file_no_channel_edit_permission(self): self.channel.viewers.add(self.user) response = self.sync_changes( - [generate_update_event(file.id, FILE, {"preset": new_preset}, channel_id=self.channel.id)], + [ + generate_update_event( + file.id, FILE, {"preset": new_preset}, channel_id=self.channel.id + ) + ], ) self.assertEqual(len(response.data["disallowed"]), 1) self.assertNotEqual( - models.File.objects.get(id=file.id).preset_id, new_preset, + models.File.objects.get(id=file.id).preset_id, + new_preset, ) def test_update_file_no_node_permission(self): @@ -179,10 +207,18 @@ def test_update_file_no_node_permission(self): new_channel_node = new_channel.main_tree.get_descendants().first().id self.sync_changes( - [generate_update_event(file.id, FILE, {"contentnode": new_channel_node}, channel_id=self.channel.id)], + [ + generate_update_event( + file.id, + FILE, + {"contentnode": new_channel_node}, + channel_id=self.channel.id, + ) + ], ) self.assertNotEqual( - models.File.objects.get(id=file.id).contentnode, new_channel_node, + models.File.objects.get(id=file.id).contentnode, + new_channel_node, ) def test_update_file_no_assessmentitem_permission(self): @@ -196,10 +232,18 @@ def test_update_file_no_assessmentitem_permission(self): new_channel_assessmentitem = new_channel_exercise.assessment_items.first().id self.sync_changes( - [generate_update_event(file.id, FILE, {"assessment_item": new_channel_assessmentitem}, channel_id=self.channel.id)], + [ + generate_update_event( + file.id, + FILE, + {"assessment_item": new_channel_assessmentitem}, + channel_id=self.channel.id, + ) + ], ) self.assertNotEqual( - models.File.objects.get(id=file.id).assessment_item, new_channel_assessmentitem, + models.File.objects.get(id=file.id).assessment_item, + new_channel_assessmentitem, ) def test_update_files(self): @@ -210,29 +254,44 @@ def test_update_files(self): response = self.sync_changes( [ - generate_update_event(file1.id, FILE, {"preset": new_preset}, channel_id=self.channel.id), - generate_update_event(file2.id, FILE, {"preset": new_preset}, channel_id=self.channel.id), + generate_update_event( + file1.id, FILE, {"preset": new_preset}, channel_id=self.channel.id + ), + generate_update_event( + file2.id, FILE, {"preset": new_preset}, channel_id=self.channel.id + ), ], ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual( - models.File.objects.get(id=file1.id).preset_id, new_preset, + models.File.objects.get(id=file1.id).preset_id, + new_preset, ) self.assertEqual( - models.File.objects.get(id=file2.id).preset_id, new_preset, + models.File.objects.get(id=file2.id).preset_id, + new_preset, ) def test_update_file_empty(self): file = models.File.objects.create(**self.file_db_metadata) - response = self.sync_changes([generate_update_event(file.id, FILE, {}, channel_id=self.channel.id)]) + response = self.sync_changes( + [generate_update_event(file.id, FILE, {}, channel_id=self.channel.id)] + ) self.assertEqual(response.status_code, 200, response.content) def test_update_file_unwriteable_fields(self): file = models.File.objects.create(**self.file_db_metadata) response = self.sync_changes( - [generate_update_event(file.id, FILE, {"not_a_field": "not_a_value"}, channel_id=self.channel.id)], + [ + generate_update_event( + file.id, + FILE, + {"not_a_field": "not_a_value"}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) @@ -241,7 +300,9 @@ def test_delete_file(self): file = models.File.objects.create(**self.file_db_metadata) self.client.force_authenticate(user=self.user) - response = self.sync_changes([generate_delete_event(file.id, FILE, channel_id=self.channel.id)]) + response = self.sync_changes( + [generate_delete_event(file.id, FILE, channel_id=self.channel.id)] + ) self.assertEqual(response.status_code, 200, response.content) try: models.File.objects.get(id=file.id) @@ -307,7 +368,11 @@ def setUp(self): def test_cannot_create_file(self): self.client.force_authenticate(user=self.user) file = self.file_metadata - response = self.client.post(reverse("file-list"), file, format="json",) + response = self.client.post( + reverse("file-list"), + file, + format="json", + ) self.assertEqual(response.status_code, 405, response.content) try: models.File.objects.get(id=file["id"]) @@ -345,25 +410,94 @@ def setUp(self): "name": "le_studio", "file_format": file_formats.MP3, "preset": format_presets.AUDIO, - "duration": 10.123 + "duration": 10.123, } def test_required_keys(self): del self.file["name"] + self.client.force_authenticate(user=self.user) + response = self.client.post( + reverse("file-upload-url"), + self.file, + format="json", + ) + self.assertEqual(response.status_code, 400) + + def test_duration_invalid(self): + self.file["duration"] = "1.23" self.client.force_authenticate(user=self.user) response = self.client.post( - reverse("file-upload-url"), self.file, format="json", + reverse("file-upload-url"), + self.file, + format="json", ) self.assertEqual(response.status_code, 400) - def test_duration_invalid(self): - self.file["duration"] = '1.23' + def test_duration_missing(self): + del self.file["duration"] + self.file["file_format"] = file_formats.EPUB + self.file["preset"] = format_presets.EPUB + + self.client.force_authenticate(user=self.user) + response = self.client.post( + reverse("file-upload-url"), + self.file, + format="json", + ) + + self.assertEqual(response.status_code, 200) + + def test_duration_missing_but_required(self): + del self.file["duration"] + self.file["file_format"] = file_formats.MP4 + + self.client.force_authenticate(user=self.user) + response = self.client.post( + reverse("file-upload-url"), + self.file, + format="json", + ) + + self.assertEqual(response.status_code, 400) + + def test_duration_present_but_not_allowed(self): + self.file["file_format"] = file_formats.EPUB + self.file["preset"] = format_presets.DOCUMENT + + self.client.force_authenticate(user=self.user) + response = self.client.post( + reverse("file-upload-url"), + self.file, + format="json", + ) + + self.assertEqual(response.status_code, 400) + + def test_duration_null(self): + self.file["duration"] = None + self.file["file_format"] = file_formats.EPUB + self.file["preset"] = format_presets.EPUB self.client.force_authenticate(user=self.user) response = self.client.post( - reverse("file-upload-url"), self.file, format="json", + reverse("file-upload-url"), + self.file, + format="json", + ) + + self.assertEqual(response.status_code, 200) + + def test_duration_null_but_required(self): + self.file["duration"] = None + self.file["file_format"] = file_formats.MP4 + + self.client.force_authenticate(user=self.user) + response = self.client.post( + reverse("file-upload-url"), + self.file, + format="json", ) self.assertEqual(response.status_code, 400) @@ -376,10 +510,36 @@ def test_invalid_file_format_upload(self): "name": "le_studio", "file_format": "ppx", "preset": format_presets.AUDIO, - "duration": 10.123 + "duration": 10.123, } response = self.client.post( - reverse("file-upload-url"), file, format="json", + reverse("file-upload-url"), + file, + format="json", + ) + self.assertEqual(response.status_code, 400) + + def test_invalid_preset_upload(self): + self.client.force_authenticate(user=self.user) + file = { + "size": 1000, + "checksum": uuid.uuid4().hex, + "name": "le_studio", + "file_format": file_formats.MP3, + "preset": "invalid_preset", # Deliberately invalid + "duration": 10.123, + } + response = self.client.post(reverse("file-upload-url"), file, format="json") + self.assertEqual(response.status_code, 400) + + def test_mismatched_preset_upload(self): + self.file["file_format"] = file_formats.EPUB + + self.client.force_authenticate(user=self.user) + response = self.client.post( + reverse("file-upload-url"), + self.file, + format="json", ) self.assertEqual(response.status_code, 400) @@ -388,20 +548,32 @@ def test_insufficient_storage(self): self.file["size"] = 100000000000000 self.client.force_authenticate(user=self.user) - response = self.client.post(reverse("file-upload-url"), self.file, format="json",) + response = self.client.post( + reverse("file-upload-url"), + self.file, + format="json", + ) self.assertEqual(response.status_code, 412) def test_upload_url(self): self.client.force_authenticate(user=self.user) - response = self.client.post(reverse("file-upload-url"), self.file, format="json",) + response = self.client.post( + reverse("file-upload-url"), + self.file, + format="json", + ) self.assertEqual(response.status_code, 200) file = models.File.objects.get(checksum=self.file["checksum"]) self.assertEqual(10, file.duration) def test_upload_url_doesnot_sets_contentnode(self): self.client.force_authenticate(user=self.user) - response = self.client.post(reverse("file-upload-url"), self.file, format="json",) + response = self.client.post( + reverse("file-upload-url"), + self.file, + format="json", + ) file = models.File.objects.get(checksum=self.file["checksum"]) self.assertEqual(response.status_code, 200) self.assertEqual(file.contentnode, None) @@ -411,7 +583,9 @@ def test_duration_zero(self): self.client.force_authenticate(user=self.user) response = self.client.post( - reverse("file-upload-url"), self.file, format="json", + reverse("file-upload-url"), + self.file, + format="json", ) self.assertEqual(response.status_code, 400) @@ -432,6 +606,7 @@ def _get_file_metadata(self): "name": "le_studio_file", "file_format": file_formats.MP3, "preset": format_presets.AUDIO, + "duration": 17, } def _upload_file_to_contentnode(self, file_metadata=None, contentnode_id=None): @@ -442,23 +617,34 @@ def _upload_file_to_contentnode(self, file_metadata=None, contentnode_id=None): to point to the contentnode. """ file = file_metadata or self._get_file_metadata() - self.client.post(reverse("file-upload-url"), file, format="json",) + self.client.post( + reverse("file-upload-url"), + file, + format="json", + ) file_from_db = models.File.objects.get(checksum=file["checksum"]) self.sync_changes( - [generate_update_event( - file_from_db.id, - FILE, - { - "contentnode": contentnode_id or self.channel.main_tree.get_descendants().first().id - }, - channel_id=self.channel.id)],) + [ + generate_update_event( + file_from_db.id, + FILE, + { + "contentnode": contentnode_id + or self.channel.main_tree.get_descendants().first().id + }, + channel_id=self.channel.id, + ) + ], + ) file_from_db.refresh_from_db() return file_from_db def _delete_file_from_contentnode(self, file_from_db): self.sync_changes( [ - generate_delete_event(file_from_db.id, FILE, channel_id=self.channel.id), + generate_delete_event( + file_from_db.id, FILE, channel_id=self.channel.id + ), ], ) @@ -481,19 +667,25 @@ def test_content_id__changes_on_upload_file_to_node(self): # Assert after new file upload, content_id changes. file.contentnode.refresh_from_db() file_contentnode_copy.refresh_from_db() - self.assertNotEqual(file.contentnode.content_id, file_contentnode_copy.content_id) + self.assertNotEqual( + file.contentnode.content_id, file_contentnode_copy.content_id + ) def test_content_id__changes_on_delete_file_from_node(self): file = self._upload_file_to_contentnode() file_contentnode_copy = file.contentnode.copy_to(target=self.channel.main_tree) # Delete file from the copied contentnode. - self._delete_file_from_contentnode(file_from_db=file_contentnode_copy.files.first()) + self._delete_file_from_contentnode( + file_from_db=file_contentnode_copy.files.first() + ) # Assert after deleting file, content_id changes. file.contentnode.refresh_from_db() file_contentnode_copy.refresh_from_db() - self.assertNotEqual(file.contentnode.content_id, file_contentnode_copy.content_id) + self.assertNotEqual( + file.contentnode.content_id, file_contentnode_copy.content_id + ) def test_content_id__doesnot_changes_on_update_original_file_node(self): file = self._upload_file_to_contentnode() @@ -532,15 +724,31 @@ def test_content_id__thumbnails_dont_update_content_id(self): thumbnail_file_meta_1 = self._get_file_metadata() thumbnail_file_meta_2 = self._get_file_metadata() - thumbnail_file_meta_1.update({"preset": format_presets.AUDIO_THUMBNAIL, "file_format": file_formats.JPEG, }) - thumbnail_file_meta_2.update({"preset": format_presets.AUDIO_THUMBNAIL, "file_format": file_formats.JPEG, }) + thumbnail_file_meta_1.update( + { + "preset": format_presets.AUDIO_THUMBNAIL, + "file_format": file_formats.JPEG, + } + ) + del thumbnail_file_meta_1["duration"] + thumbnail_file_meta_2.update( + { + "preset": format_presets.AUDIO_THUMBNAIL, + "file_format": file_formats.JPEG, + } + ) + del thumbnail_file_meta_2["duration"] # Upload thumbnail to original contentnode and copied contentnode. # content_id should remain same for both these nodes. original_node_content_id_before_upload = file.contentnode.content_id copied_node_content_id_before_upload = file_contentnode_copy.content_id - self._upload_file_to_contentnode(file_metadata=thumbnail_file_meta_1, contentnode_id=file.contentnode.id) - self._upload_file_to_contentnode(file_metadata=thumbnail_file_meta_2, contentnode_id=file_contentnode_copy.id) + self._upload_file_to_contentnode( + file_metadata=thumbnail_file_meta_1, contentnode_id=file.contentnode.id + ) + self._upload_file_to_contentnode( + file_metadata=thumbnail_file_meta_2, contentnode_id=file_contentnode_copy.id + ) # Assert content_id is same after uploading thumbnails to nodes. file.contentnode.refresh_from_db() @@ -548,5 +756,10 @@ def test_content_id__thumbnails_dont_update_content_id(self): original_node_content_id_after_upload = file.contentnode.content_id copied_node_content_id_after_upload = file_contentnode_copy.content_id - self.assertEqual(original_node_content_id_before_upload, original_node_content_id_after_upload) - self.assertEqual(copied_node_content_id_before_upload, copied_node_content_id_after_upload) + self.assertEqual( + original_node_content_id_before_upload, + original_node_content_id_after_upload, + ) + self.assertEqual( + copied_node_content_id_before_upload, copied_node_content_id_after_upload + ) diff --git a/contentcuration/contentcuration/tests/viewsets/test_flagged.py b/contentcuration/contentcuration/tests/viewsets/test_flagged.py index a507c5e4e9..1f2acf3ac2 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_flagged.py +++ b/contentcuration/contentcuration/tests/viewsets/test_flagged.py @@ -10,13 +10,13 @@ class CRUDTestCase(StudioAPITestCase): @property def flag_feedback_object(self): return { - 'context': {'spam': 'Spam or misleading'}, - 'contentnode_id': self.contentNode.id, - 'content_id': self.contentNode.content_id, - 'target_channel_id': self.channel.id, - 'user': self.user.id, - 'feedback_type': 'FLAGGED', - 'feedback_reason': 'Some reason provided by the user' + "context": {"spam": "Spam or misleading"}, + "contentnode_id": self.contentNode.id, + "content_id": self.contentNode.content_id, + "target_channel_id": self.channel.id, + "user": self.user.id, + "feedback_type": "FLAGGED", + "feedback_reason": "Some reason provided by the user", } def setUp(self): @@ -34,17 +34,21 @@ def test_create_flag_event(self): self.client.force_authenticate(user=self.user) flagged_content = self.flag_feedback_object response = self.client.post( - reverse("flagged-list"), flagged_content, format="json", + reverse("flagged-list"), + flagged_content, + format="json", ) self.assertEqual(response.status_code, 201, response.content) def test_create_flag_event_fails_for_flag_test_dev_feature_disabled(self): flagged_content = self.flag_feedback_object - self.user.feature_flags = {'test_dev_feature': False} + self.user.feature_flags = {"test_dev_feature": False} self.user.save() self.client.force_authenticate(user=self.user) response = self.client.post( - reverse("flagged-list"), flagged_content, format="json", + reverse("flagged-list"), + flagged_content, + format="json", ) self.assertEqual(response.status_code, 403, response.content) @@ -54,14 +58,18 @@ def test_create_flag_event_fails_for_flag_test_dev_feature_None(self): self.user.save() self.client.force_authenticate(user=self.user) response = self.client.post( - reverse("flagged-list"), flagged_content, format="json", + reverse("flagged-list"), + flagged_content, + format="json", ) self.assertEqual(response.status_code, 403, response.content) def test_create_flag_event_fails_for_unauthorized_user(self): flagged_content = self.flag_feedback_object response = self.client.post( - reverse("flagged-list"), flagged_content, format="json", + reverse("flagged-list"), + flagged_content, + format="json", ) self.assertEqual(response.status_code, 403, response.content) @@ -76,16 +84,19 @@ def test_retreive_fails_for_normal_user(self): self.client.force_authenticate(user=self.user) flag_feedback_object = FlagFeedbackEvent.objects.create( **{ - 'context': {'spam': 'Spam or misleading'}, - 'contentnode_id': self.contentNode.id, - 'content_id': self.contentNode.content_id, - 'target_channel_id': self.channel.id, - 'feedback_type': 'FLAGGED', - 'feedback_reason': 'Some reason provided by the user' + "context": {"spam": "Spam or misleading"}, + "contentnode_id": self.contentNode.id, + "content_id": self.contentNode.content_id, + "target_channel_id": self.channel.id, + "feedback_type": "FLAGGED", + "feedback_reason": "Some reason provided by the user", }, user=self.user, ) - response = self.client.get(reverse("flagged-detail", kwargs={"pk": flag_feedback_object.id}), format="json") + response = self.client.get( + reverse("flagged-detail", kwargs={"pk": flag_feedback_object.id}), + format="json", + ) self.assertEqual(response.status_code, 403, response.content) def test_list_fails_for_normal_user(self): @@ -103,32 +114,38 @@ def test_destroy_flagged_content_super_admin(self): self.client.force_authenticate(self.user) flag_feedback_object = FlagFeedbackEvent.objects.create( **{ - 'context': {'spam': 'Spam or misleading'}, - 'contentnode_id': self.contentNode.id, - 'content_id': self.contentNode.content_id, - 'target_channel_id': self.channel.id, - 'feedback_type': 'FLAGGED', - 'feedback_reason': 'Some reason provided by the user' + "context": {"spam": "Spam or misleading"}, + "contentnode_id": self.contentNode.id, + "content_id": self.contentNode.content_id, + "target_channel_id": self.channel.id, + "feedback_type": "FLAGGED", + "feedback_reason": "Some reason provided by the user", }, user=self.user, ) - response = self.client.delete(reverse("flagged-detail", kwargs={"pk": flag_feedback_object.id}), format="json") + response = self.client.delete( + reverse("flagged-detail", kwargs={"pk": flag_feedback_object.id}), + format="json", + ) self.assertEqual(response.status_code, 204, response.content) def test_destroy_flagged_content_fails_for_user_with_feature_flag_disabled(self): - self.user.feature_flags = {'test_dev_feature': False} + self.user.feature_flags = {"test_dev_feature": False} self.user.save() self.client.force_authenticate(user=self.user) flag_feedback_object = FlagFeedbackEvent.objects.create( **{ - 'context': {'spam': 'Spam or misleading'}, - 'contentnode_id': self.contentNode.id, - 'content_id': self.contentNode.content_id, - 'target_channel_id': self.channel.id, - 'feedback_type': 'FLAGGED', - 'feedback_reason': 'Some reason provided by the user' + "context": {"spam": "Spam or misleading"}, + "contentnode_id": self.contentNode.id, + "content_id": self.contentNode.content_id, + "target_channel_id": self.channel.id, + "feedback_type": "FLAGGED", + "feedback_reason": "Some reason provided by the user", }, user=self.user, ) - response = self.client.delete(reverse("flagged-detail", kwargs={"pk": flag_feedback_object.id}), format="json") + response = self.client.delete( + reverse("flagged-detail", kwargs={"pk": flag_feedback_object.id}), + format="json", + ) self.assertEqual(response.status_code, 403, response.content) diff --git a/contentcuration/contentcuration/tests/viewsets/test_invitation.py b/contentcuration/contentcuration/tests/viewsets/test_invitation.py index fad9b52be4..f044f50a99 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_invitation.py +++ b/contentcuration/contentcuration/tests/viewsets/test_invitation.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import uuid from django.urls import reverse @@ -15,7 +13,6 @@ class SyncTestCase(SyncTestMixin, StudioAPITestCase): - @property def invitation_metadata(self): return { @@ -45,7 +42,15 @@ def setUp(self): def test_create_invitation(self): invitation = self.invitation_metadata response = self.sync_changes( - [generate_create_event(invitation["id"], INVITATION, invitation, channel_id=self.channel.id, user_id=self.invited_user.id)], + [ + generate_create_event( + invitation["id"], + INVITATION, + invitation, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -58,8 +63,20 @@ def test_create_invitations(self): invitation2 = self.invitation_metadata response = self.sync_changes( [ - generate_create_event(invitation1["id"], INVITATION, invitation1, channel_id=self.channel.id, user_id=self.invited_user.id), - generate_create_event(invitation2["id"], INVITATION, invitation2, channel_id=self.channel.id, user_id=self.invited_user.id), + generate_create_event( + invitation1["id"], + INVITATION, + invitation1, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ), + generate_create_event( + invitation2["id"], + INVITATION, + invitation2, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ), ], ) self.assertEqual(response.status_code, 200, response.content) @@ -79,7 +96,15 @@ def test_create_invitation_no_channel_permission(self): invitation = self.invitation_metadata invitation["channel"] = new_channel.id response = self.sync_changes( - [generate_create_event(invitation["id"], INVITATION, invitation, channel_id=self.channel.id, user_id=self.invited_user.id)], + [ + generate_create_event( + invitation["id"], + INVITATION, + invitation, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -94,7 +119,14 @@ def test_update_invitation_accept(self): self.client.force_authenticate(user=self.invited_user) response = self.sync_changes( - [generate_update_event(invitation.id, INVITATION, {"accepted": True}, user_id=self.invited_user.id)], + [ + generate_update_event( + invitation.id, + INVITATION, + {"accepted": True}, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -114,7 +146,15 @@ def test_update_invitation_revoke(self): invitation = models.Invitation.objects.create(**self.invitation_db_metadata) response = self.sync_changes( - [generate_update_event(invitation.id, INVITATION, {"revoked": True}, channel_id=self.channel.id, user_id=self.invited_user.id)], + [ + generate_update_event( + invitation.id, + INVITATION, + {"revoked": True}, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -135,7 +175,15 @@ def test_update_invitation_invited_user_cannot_revoke(self): self.client.force_authenticate(user=self.invited_user) response = self.sync_changes( - [generate_update_event(invitation.id, INVITATION, {"revoked": True}, channel_id=self.channel.id, user_id=self.invited_user.id)], + [ + generate_update_event( + invitation.id, + INVITATION, + {"revoked": True}, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) invitation = models.Invitation.objects.get(id=invitation.id) @@ -149,7 +197,15 @@ def test_update_invitation_invited_user_cannot_accept_revoked_invitation(self): self.client.force_authenticate(user=self.invited_user) response = self.sync_changes( - [generate_update_event(invitation.id, INVITATION, {"accepted": True}, channel_id=self.channel.id, user_id=self.invited_user.id)], + [ + generate_update_event( + invitation.id, + INVITATION, + {"accepted": True}, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) invitation = models.Invitation.objects.get(id=invitation.id) @@ -160,7 +216,15 @@ def test_update_invitation_sender_cannot_modify_invited_user_fields(self): invitation = models.Invitation.objects.create(**self.invitation_db_metadata) response = self.sync_changes( - [generate_update_event(invitation.id, INVITATION, {"accepted": True, "declined": True}, channel_id=self.channel.id, user_id=self.invited_user.id)], + [ + generate_update_event( + invitation.id, + INVITATION, + {"accepted": True, "declined": True}, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) invitation = models.Invitation.objects.get(id=invitation.id) @@ -172,7 +236,15 @@ def test_update_invitation_decline(self): invitation = models.Invitation.objects.create(**self.invitation_db_metadata) response = self.sync_changes( - [generate_update_event(invitation.id, INVITATION, {"declined": True}, channel_id=self.channel.id, user_id=self.invited_user.id)], + [ + generate_update_event( + invitation.id, + INVITATION, + {"declined": True}, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -190,7 +262,15 @@ def test_update_invitation_empty(self): invitation = models.Invitation.objects.create(**self.invitation_db_metadata) response = self.sync_changes( - [generate_update_event(invitation.id, INVITATION, {}, channel_id=self.channel.id, user_id=self.invited_user.id)], + [ + generate_update_event( + invitation.id, + INVITATION, + {}, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) @@ -200,7 +280,11 @@ def test_update_invitation_unwriteable_fields(self): response = self.sync_changes( [ generate_update_event( - invitation.id, INVITATION, {"not_a_field": "not_a_value"}, channel_id=self.channel.id, user_id=self.invited_user.id + invitation.id, + INVITATION, + {"not_a_field": "not_a_value"}, + channel_id=self.channel.id, + user_id=self.invited_user.id, ) ], ) @@ -211,7 +295,14 @@ def test_delete_invitation(self): invitation = models.Invitation.objects.create(**self.invitation_db_metadata) response = self.sync_changes( - [generate_delete_event(invitation.id, INVITATION, channel_id=self.channel.id, user_id=self.invited_user.id)], + [ + generate_delete_event( + invitation.id, + INVITATION, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -227,8 +318,18 @@ def test_delete_invitations(self): response = self.sync_changes( [ - generate_delete_event(invitation1.id, INVITATION, channel_id=self.channel.id, user_id=self.invited_user.id), - generate_delete_event(invitation2.id, INVITATION, channel_id=self.channel.id, user_id=self.invited_user.id), + generate_delete_event( + invitation1.id, + INVITATION, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ), + generate_delete_event( + invitation2.id, + INVITATION, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ), ], ) self.assertEqual(response.status_code, 200, response.content) @@ -275,7 +376,9 @@ def test_create_invitation(self): self.client.force_authenticate(user=self.user) invitation = self.invitation_metadata response = self.client.post( - reverse("invitation-list"), invitation, format="json", + reverse("invitation-list"), + invitation, + format="json", ) self.assertEqual(response.status_code, 405, response.content) @@ -283,7 +386,9 @@ def test_update_invitation_accept(self): invitation = models.Invitation.objects.create(**self.invitation_db_metadata) self.client.force_authenticate(user=self.invited_user) - response = self.client.post(reverse("invitation-accept", kwargs={"pk": invitation.id})) + response = self.client.post( + reverse("invitation-accept", kwargs={"pk": invitation.id}) + ) self.assertEqual(response.status_code, 200, response.content) try: invitation = models.Invitation.objects.get(id=invitation.id) @@ -324,7 +429,9 @@ def test_update_invitation_decline(self): invitation = models.Invitation.objects.create(**self.invitation_db_metadata) self.client.force_authenticate(user=self.invited_user) - response = self.client.post(reverse("invitation-decline", kwargs={"pk": invitation.id})) + response = self.client.post( + reverse("invitation-decline", kwargs={"pk": invitation.id}) + ) self.assertEqual(response.status_code, 200, response.content) try: invitation = models.Invitation.objects.get(id=invitation.id) diff --git a/contentcuration/contentcuration/tests/viewsets/test_recommendations.py b/contentcuration/contentcuration/tests/viewsets/test_recommendations.py new file mode 100644 index 0000000000..e792cfc75b --- /dev/null +++ b/contentcuration/contentcuration/tests/viewsets/test_recommendations.py @@ -0,0 +1,489 @@ +import uuid + +from automation.utils.appnexus import errors +from django.urls import reverse +from le_utils.constants import content_kinds +from mock import patch + +from contentcuration.models import RecommendationsEvent +from contentcuration.models import RecommendationsInteractionEvent +from contentcuration.tests import testdata +from contentcuration.tests.base import StudioAPITestCase + + +class RecommendationsCRUDTestCase(StudioAPITestCase): + @property + def topics(self): + return { + "topics": [ + { + "id": str(uuid.uuid4()), + "channel_id": str(uuid.uuid4()), + "title": "Target topic", + "description": "Target description", + "language": "en", + "ancestors": [ + { + "id": str(uuid.uuid4()), + "title": "Parent topic", + "description": "Parent description", + "language": "en", + "level": 1, + } + ], + } + ], + "metadata": { + "channel_id": "000", + "channel_title": "Channel title", + "some_additional_field": "some_random_value", + }, + } + + @property + def recommendations_list(self): + return [ + { + "id": "00000000000000000000000000000001", + "node_id": "00000000000000000000000000000002", + "main_tree_id": "1", + "parent_id": "00000000000000000000000000000003", + "channel_id": "00000000000000000000000000000007", + "rank": 1, + }, + { + "id": "00000000000000000000000000000004", + "node_id": "00000000000000000000000000000005", + "main_tree_id": "2", + "parent_id": "00000000000000000000000000000006", + "channel_id": "00000000000000000000000000000008", + "rank": 2, + }, + ] + + def setUp(self): + super(RecommendationsCRUDTestCase, self).setUp() + + @patch( + "contentcuration.utils.automation_manager.AutomationManager.load_recommendations" + ) + def test_recommend_success(self, mock_load_recommendations): + self.client.force_authenticate(user=self.admin_user) + mock_load_recommendations.return_value = self.recommendations_list + + response = self.client.post( + reverse("recommendations"), data=self.topics, format="json" + ) + + self.assertEqual(response.status_code, 200, response.content) + self.assertEqual(response.json(), self.recommendations_list) + + def test_recommend_invalid_data_empty_data(self): + self.client.force_authenticate(user=self.admin_user) + + error_message = "Invalid request data. Please check the required fields." + invalid_data = {} + response = self.client.post( + reverse("recommendations"), data=invalid_data, format="json" + ) + self.assertEqual(response.status_code, 400) + self.assertIn(error_message, response.json()["error"]) + + def test_recommend_invalid_data_wrong_topic_data(self): + self.client.force_authenticate(user=self.admin_user) + + error_message = "Invalid request data. Please check the required fields." + invalid_data = {"topics": [{"ramdon_field": "random_value"}]} + response = self.client.post( + reverse("recommendations"), data=invalid_data, format="json" + ) + self.assertEqual(response.status_code, 400) + self.assertEqual(error_message, response.json()["error"]) + + @patch( + "contentcuration.utils.automation_manager.AutomationManager.load_recommendations" + ) + def test_recommendation_invalid_data_formats(self, mock_load_recommendations): + self.client.force_authenticate(user=self.admin_user) + + error_message = "Invalid input provided." + mock_load_recommendations.side_effect = errors.InvalidRequest(error_message) + + response = self.client.post( + reverse("recommendations"), data=self.topics, format="json" + ) + + self.assertEqual(response.status_code, 400) + self.assertEqual(response.json(), {"error": error_message}) + mock_load_recommendations.assert_called_once() + + @patch( + "contentcuration.utils.automation_manager.AutomationManager.load_recommendations" + ) + def test_recommendation_service_unavailable(self, mock_load_recommendations): + self.client.force_authenticate(user=self.admin_user) + + error_message = "Recommendation service unavailable" + mock_load_recommendations.side_effect = errors.ConnectionError(error_message) + + response = self.client.post( + reverse("recommendations"), data=self.topics, format="json" + ) + + self.assertEqual(response.status_code, 503) + self.assertEqual(response.json(), {"error": error_message}) + mock_load_recommendations.assert_called_once() + + @patch( + "contentcuration.utils.automation_manager.AutomationManager.load_recommendations" + ) + def test_recommendation_generic_error(self, mock_load_recommendations): + self.client.force_authenticate(user=self.admin_user) + + error_message = "Unable to load recommendations" + mock_load_recommendations.side_effect = errors.HttpError(error_message) + response = self.client.post( + reverse("recommendations"), data=self.topics, format="json" + ) + + self.assertEqual(response.status_code, 500) + self.assertEqual(response.content.decode(), error_message) + mock_load_recommendations.assert_called_once() + + +class RecommendationsEventViewSetTestCase(StudioAPITestCase): + @property + def recommendations_event_object(self): + return { + "context": {"model_version": 1, "breadcrumbs": "#Title#->Random"}, + "contentnode_id": self.contentNode.id, + "content_id": self.contentNode.content_id, + "target_channel_id": self.channel.id, + "user": self.user.id, + "time_hidden": "2024-03-20T10:00:00Z", + "content": [ + { + "content_id": str(self.contentNode.content_id), + "node_id": str(self.contentNode.id), + "channel_id": str(self.channel.id), + "score": 4, + } + ], + } + + def setUp(self): + super(RecommendationsEventViewSetTestCase, self).setUp() + self.contentNode = testdata.node( + { + "kind_id": content_kinds.VIDEO, + "title": "Recommended Video content", + }, + ) + self.channel = testdata.channel() + self.user = testdata.user() + self.client.force_authenticate(user=self.user) + + def test_create_recommendations_event(self): + recommendations_event = self.recommendations_event_object + response = self.client.post( + reverse("recommendations-list"), + recommendations_event, + format="json", + ) + self.assertEqual(response.status_code, 201, response.content) + + def test_list_fails(self): + response = self.client.get(reverse("recommendations-list"), format="json") + self.assertEqual(response.status_code, 405, response.content) + + def test_retrieve_fails(self): + recommendations_event = RecommendationsEvent.objects.create( + context={"model_version": 1, "breadcrumbs": "#Title#->Random"}, + contentnode_id=self.contentNode.id, + content_id=self.contentNode.content_id, + target_channel_id=self.channel.id, + time_hidden="2024-03-20T10:00:00Z", + content=[ + { + "content_id": str(self.contentNode.content_id), + "node_id": str(self.contentNode.id), + "channel_id": str(self.channel.id), + "score": 4, + } + ], + user=self.user, + ) + response = self.client.get( + reverse("recommendations-detail", kwargs={"pk": recommendations_event.id}), + format="json", + ) + self.assertEqual(response.status_code, 405, response.content) + + def test_update_recommendations_event(self): + recommendations_event = RecommendationsEvent.objects.create( + context={"model_version": 1, "breadcrumbs": "#Title#->Random"}, + contentnode_id=self.contentNode.id, + content_id=self.contentNode.content_id, + target_channel_id=self.channel.id, + time_hidden="2024-03-20T10:00:00Z", + content=[ + { + "content_id": str(self.contentNode.content_id), + "node_id": str(self.contentNode.id), + "channel_id": str(self.channel.id), + "score": 4, + } + ], + user=self.user, + ) + updated_data = self.recommendations_event_object + updated_data["context"] = { + "model_version": 2, + "breadcrumbs": "#Title#->Updated", + } + response = self.client.put( + reverse("recommendations-detail", kwargs={"pk": recommendations_event.id}), + updated_data, + format="json", + ) + self.assertEqual(response.status_code, 200, response.content) + + def test_partial_update_recommendations_event(self): + recommendations_event = RecommendationsEvent.objects.create( + context={"model_version": 1, "breadcrumbs": "#Title#->Random"}, + contentnode_id=self.contentNode.id, + content_id=self.contentNode.content_id, + target_channel_id=self.channel.id, + time_hidden="2024-03-20T10:00:00Z", + content=[ + { + "content_id": str(self.contentNode.content_id), + "node_id": str(self.contentNode.id), + "channel_id": str(self.channel.id), + "score": 4, + } + ], + user=self.user, + ) + response = self.client.patch( + reverse("recommendations-detail", kwargs={"pk": recommendations_event.id}), + {"context": {"model_version": 2}}, + format="json", + ) + self.assertEqual(response.status_code, 200, response.content) + + def test_destroy_recommendations_event(self): + recommendations_event = RecommendationsEvent.objects.create( + context={"model_version": 1, "breadcrumbs": "#Title#->Random"}, + contentnode_id=self.contentNode.id, + content_id=self.contentNode.content_id, + target_channel_id=self.channel.id, + time_hidden="2024-03-20T10:00:00Z", + content=[ + { + "content_id": str(self.contentNode.content_id), + "node_id": str(self.contentNode.id), + "channel_id": str(self.channel.id), + "score": 4, + } + ], + user=self.user, + ) + response = self.client.delete( + reverse("recommendations-detail", kwargs={"pk": recommendations_event.id}), + format="json", + ) + self.assertEqual(response.status_code, 405, response.content) + + +class RecommendationsInteractionEventViewSetTestCase(StudioAPITestCase): + @property + def recommendations_interaction_object(self): + return { + "context": {"test_key": "test_value"}, + "contentnode_id": self.interaction_node.id, + "content_id": self.interaction_node.content_id, + "feedback_type": "IGNORED", + "feedback_reason": "----", + "recommendation_event_id": str(self.recommendation_event.id), + } + + def setUp(self): + super(RecommendationsInteractionEventViewSetTestCase, self).setUp() + self.channel = testdata.channel() + self.user = testdata.user() + self.client.force_authenticate(user=self.user) + self.interaction_node = testdata.node( + { + "kind_id": content_kinds.VIDEO, + "title": "Recommended Video content", + }, + ) + self.node_where_import_is_initiated = testdata.node( + { + "kind_id": content_kinds.TOPIC, + "title": "Node where content is imported", + }, + ) + self.recommendation_event = RecommendationsEvent.objects.create( + user=self.user, + target_channel_id=self.channel.id, + content_id=self.node_where_import_is_initiated.content_id, + contentnode_id=self.node_where_import_is_initiated.id, + context={"model_version": 1, "breadcrumbs": "#Title#->Random"}, + time_hidden="2024-03-20T10:00:00Z", + content=[ + { + "content_id": str(self.interaction_node.content_id), + "node_id": str(self.interaction_node.id), + "channel_id": str(self.channel.id), + "score": 4, + } + ], + ) + + def test_create_recommendations_interaction(self): + recommendations_interaction = self.recommendations_interaction_object + response = self.client.post( + reverse("recommendations-interaction-list"), + recommendations_interaction, + format="json", + ) + self.assertEqual(response.status_code, 201, response.content) + + def test_bulk_create_recommendations_interaction(self): + recommendations_interactions = [ + { + "context": {"test_key": "test_value_1"}, + "contentnode_id": self.interaction_node.id, + "content_id": self.interaction_node.content_id, + "feedback_type": "IGNORED", + "feedback_reason": "----", + "recommendation_event_id": str(self.recommendation_event.id), + }, + { + "context": {"test_key": "test_value_2"}, + "contentnode_id": self.interaction_node.id, + "content_id": self.interaction_node.content_id, + "feedback_type": "PREVIEWED", + "feedback_reason": "----", + "recommendation_event_id": str(self.recommendation_event.id), + }, + ] + response = self.client.post( + reverse("recommendations-interaction-list"), + recommendations_interactions, + format="json", + ) + self.assertEqual(response.status_code, 201, response.content) + self.assertEqual(len(response.json()), len(recommendations_interactions)) + + def test_bulk_create_recommendations_interaction_failure(self): + # One valid, one invalid (missing required field) + recommendations_interactions = [ + { + "context": {"test_key": "test_value_1"}, + "contentnode_id": self.interaction_node.id, + "content_id": self.interaction_node.content_id, + "feedback_type": "IGNORED", + "feedback_reason": "----", + "recommendation_event_id": str(self.recommendation_event.id), + }, + { + # Missing 'feedback_type' + "context": {"test_key": "test_value_2"}, + "contentnode_id": self.interaction_node.id, + "content_id": self.interaction_node.content_id, + "feedback_reason": "----", + "recommendation_event_id": str(self.recommendation_event.id), + }, + ] + response = self.client.post( + reverse("recommendations-interaction-list"), + recommendations_interactions, + format="json", + ) + self.assertEqual(response.status_code, 400, response.content) + self.assertIn("feedback_type", str(response.content)) + + def test_list_fails(self): + response = self.client.get( + reverse("recommendations-interaction-list"), format="json" + ) + self.assertEqual(response.status_code, 405, response.content) + + def test_retrieve_fails(self): + recommendations_interaction = RecommendationsInteractionEvent.objects.create( + context={"test_key": "test_value"}, + contentnode_id=self.interaction_node.id, + content_id=self.interaction_node.content_id, + feedback_type="IGNORED", + feedback_reason="----", + recommendation_event_id=self.recommendation_event.id, + ) + response = self.client.get( + reverse( + "recommendations-interaction-detail", + kwargs={"pk": recommendations_interaction.id}, + ), + format="json", + ) + self.assertEqual(response.status_code, 405, response.content) + + def test_update_recommendations_interaction(self): + recommendations_interaction = RecommendationsInteractionEvent.objects.create( + context={"test_key": "test_value"}, + contentnode_id=self.interaction_node.id, + content_id=self.interaction_node.content_id, + feedback_type="IGNORED", + feedback_reason="----", + recommendation_event_id=self.recommendation_event.id, + ) + updated_data = self.recommendations_interaction_object + updated_data["feedback_type"] = "PREVIEWED" + response = self.client.put( + reverse( + "recommendations-interaction-detail", + kwargs={"pk": recommendations_interaction.id}, + ), + updated_data, + format="json", + ) + self.assertEqual(response.status_code, 200, response.content) + + def test_partial_update_recommendations_interaction(self): + recommendations_interaction = RecommendationsInteractionEvent.objects.create( + context={"test_key": "test_value"}, + contentnode_id=self.interaction_node.id, + content_id=self.interaction_node.content_id, + feedback_type="IGNORED", + feedback_reason="----", + recommendation_event_id=self.recommendation_event.id, + ) + response = self.client.patch( + reverse( + "recommendations-interaction-detail", + kwargs={"pk": recommendations_interaction.id}, + ), + {"feedback_type": "IMPORTED"}, + format="json", + ) + self.assertEqual(response.status_code, 200, response.content) + + def test_destroy_recommendations_interaction(self): + recommendations_interaction = RecommendationsInteractionEvent.objects.create( + context={"test_key": "test_value"}, + contentnode_id=self.interaction_node.id, + content_id=self.interaction_node.content_id, + feedback_type="IGNORED", + feedback_reason="----", + recommendation_event_id=self.recommendation_event.id, + ) + response = self.client.delete( + reverse( + "recommendations-interaction-detail", + kwargs={"pk": recommendations_interaction.id}, + ), + format="json", + ) + self.assertEqual(response.status_code, 405, response.content) diff --git a/contentcuration/contentcuration/tests/viewsets/test_user.py b/contentcuration/contentcuration/tests/viewsets/test_user.py index 54c3f98ea5..5e8554f35a 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_user.py +++ b/contentcuration/contentcuration/tests/viewsets/test_user.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from django.urls import reverse from contentcuration.tests import testdata @@ -12,7 +10,6 @@ class SyncTestCase(SyncTestMixin, StudioAPITestCase): - def setUp(self): super(SyncTestCase, self).setUp() self.channel = testdata.channel() @@ -25,8 +22,20 @@ def test_create_editor_and_viewer(self): self.client.force_authenticate(user=self.user) response = self.sync_changes( [ - generate_create_event([editor.id, self.channel.id], EDITOR_M2M, {}, channel_id=self.channel.id, user_id=editor.id), - generate_create_event([viewer.id, self.channel.id], VIEWER_M2M, {}, channel_id=self.channel.id, user_id=viewer.id), + generate_create_event( + [editor.id, self.channel.id], + EDITOR_M2M, + {}, + channel_id=self.channel.id, + user_id=editor.id, + ), + generate_create_event( + [viewer.id, self.channel.id], + VIEWER_M2M, + {}, + channel_id=self.channel.id, + user_id=viewer.id, + ), ], ) self.assertEqual(response.status_code, 200, response.content) @@ -41,8 +50,18 @@ def test_delete_editor_and_viewer(self): self.client.force_authenticate(user=self.user) response = self.sync_changes( [ - generate_delete_event([editor.id, self.channel.id], EDITOR_M2M, channel_id=self.channel.id, user_id=editor.id), - generate_delete_event([viewer.id, self.channel.id], VIEWER_M2M, channel_id=self.channel.id, user_id=viewer.id), + generate_delete_event( + [editor.id, self.channel.id], + EDITOR_M2M, + channel_id=self.channel.id, + user_id=editor.id, + ), + generate_delete_event( + [viewer.id, self.channel.id], + VIEWER_M2M, + channel_id=self.channel.id, + user_id=viewer.id, + ), ], ) self.assertEqual(response.status_code, 200, response.content) @@ -60,14 +79,19 @@ def setUp(self): def test_fetch_user(self): self.client.force_authenticate(user=self.user) response = self.client.get( - reverse("user-detail", kwargs={"pk": self.user.id}), format="json", + reverse("user-detail", kwargs={"pk": self.user.id}), + format="json", ) self.assertEqual(response.status_code, 200, response.content) def test_no_create_user(self): self.client.force_authenticate(user=self.user) user = {} - response = self.client.post(reverse("user-list"), user, format="json",) + response = self.client.post( + reverse("user-list"), + user, + format="json", + ) self.assertEqual(response.status_code, 405, response.content) def test_admin_no_create_user(self): @@ -75,7 +99,11 @@ def test_admin_no_create_user(self): self.user.save() self.client.force_authenticate(user=self.user) user = {} - response = self.client.post(reverse("admin-users-list"), user, format="json",) + response = self.client.post( + reverse("admin-users-list"), + user, + format="json", + ) self.assertEqual(response.status_code, 405, response.content) def test_no_update_user(self): @@ -129,7 +157,9 @@ def setUp(self): def test_fetch_users(self): self.client.force_authenticate(user=self.user) response = self.client.get( - reverse("user-list"), data={"channel": self.channel.id}, format="json", + reverse("user-list"), + data={"channel": self.channel.id}, + format="json", ) self.assertEqual(response.status_code, 200, response.content) @@ -137,7 +167,9 @@ def test_fetch_users_no_permissions(self): new_channel = testdata.channel() self.client.force_authenticate(user=self.user) response = self.client.get( - reverse("user-list"), data={"channel": new_channel.id}, format="json", + reverse("user-list"), + data={"channel": new_channel.id}, + format="json", ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual(response.json(), []) diff --git a/contentcuration/contentcuration/urls.py b/contentcuration/contentcuration/urls.py index 8047ca0bf4..59cf09e891 100644 --- a/contentcuration/contentcuration/urls.py +++ b/contentcuration/contentcuration/urls.py @@ -13,6 +13,8 @@ 1. Add an import: from blog import urls as blog_urls 2. Add a URL to urlpatterns: re_path(r'^blog/', include(blog_urls)) """ +import uuid + import django_js_reverse.views as django_js_reverse_views from django.conf import settings from django.conf.urls.i18n import i18n_patterns @@ -40,8 +42,11 @@ from contentcuration.viewsets.clipboard import ClipboardViewSet from contentcuration.viewsets.contentnode import ContentNodeViewSet from contentcuration.viewsets.feedback import FlagFeedbackEventViewSet +from contentcuration.viewsets.feedback import RecommendationsEventViewSet +from contentcuration.viewsets.feedback import RecommendationsInteractionEventViewSet from contentcuration.viewsets.file import FileViewSet from contentcuration.viewsets.invitation import InvitationViewSet +from contentcuration.viewsets.recommendation import RecommendationView from contentcuration.viewsets.sync.endpoint import SyncView from contentcuration.viewsets.user import AdminUserViewSet from contentcuration.viewsets.user import ChannelUserViewSet @@ -50,44 +55,80 @@ class StagingPageRedirectView(RedirectView): def get_redirect_url(self, *args, **kwargs): - channel_id = kwargs['channel_id'] - return '/channels/{}/#/staging'.format(channel_id) + try: + channel_id = uuid.UUID(kwargs["channel_id"]).hex + return "/channels/{}/#/staging".format(channel_id) + except ValueError: + return None router = routers.DefaultRouter(trailing_slash=False) -router.register(r'bookmark', BookmarkViewSet, basename="bookmark") -router.register(r'channel', ChannelViewSet) -router.register(r'channelset', ChannelSetViewSet) -router.register(r'catalog', CatalogViewSet, basename='catalog') -router.register(r'admin-channels', AdminChannelViewSet, basename='admin-channels') -router.register(r'file', FileViewSet) -router.register(r'channeluser', ChannelUserViewSet, basename="channeluser") -router.register(r'user', UserViewSet) -router.register(r'invitation', InvitationViewSet) -router.register(r'contentnode', ContentNodeViewSet) -router.register(r'assessmentitem', AssessmentItemViewSet) -router.register(r'admin-users', AdminUserViewSet, basename='admin-users') -router.register(r'clipboard', ClipboardViewSet, basename='clipboard') -router.register(r'flagged', FlagFeedbackEventViewSet, basename='flagged') +router.register(r"bookmark", BookmarkViewSet, basename="bookmark") +router.register(r"channel", ChannelViewSet) +router.register(r"channelset", ChannelSetViewSet) +router.register(r"catalog", CatalogViewSet, basename="catalog") +router.register(r"admin-channels", AdminChannelViewSet, basename="admin-channels") +router.register(r"file", FileViewSet) +router.register(r"channeluser", ChannelUserViewSet, basename="channeluser") +router.register(r"user", UserViewSet) +router.register(r"invitation", InvitationViewSet) +router.register(r"contentnode", ContentNodeViewSet) +router.register(r"assessmentitem", AssessmentItemViewSet) +router.register(r"admin-users", AdminUserViewSet, basename="admin-users") +router.register(r"clipboard", ClipboardViewSet, basename="clipboard") +router.register(r"flagged", FlagFeedbackEventViewSet, basename="flagged") +router.register( + r"recommendations", RecommendationsEventViewSet, basename="recommendations" +) +router.register( + r"recommendationsinteraction", + RecommendationsInteractionEventViewSet, + basename="recommendations-interaction", +) urlpatterns = [ - re_path(r'^api/', include(router.urls)), - re_path(r'^serviceWorker.js$', pwa.ServiceWorkerView.as_view(), name="service_worker"), - re_path(r'^healthz$', views.health, name='health'), - re_path(r'^stealthz$', views.stealth, name='stealth'), - re_path(r'^api/search/', include('search.urls'), name='search'), - re_path(r'^api/probers/get_prober_channel', views.get_prober_channel, name='get_prober_channel'), - re_path(r'^api/probers/publishing_status', views.publishing_status, name='publishing_status'), - re_path(r'^api/probers/celery_worker_status', views.celery_worker_status, name='celery_worker_status'), - re_path(r'^api/probers/task_queue_status', views.task_queue_status, name='task_queue_status'), - re_path(r'^api/probers/unapplied_changes_status', views.unapplied_changes_status, name='unapplied_changes_status'), - re_path(r'^api/sync/$', SyncView.as_view(), name="sync"), + re_path(r"^api/", include(router.urls)), + re_path( + r"^serviceWorker.js$", pwa.ServiceWorkerView.as_view(), name="service_worker" + ), + re_path(r"^healthz$", views.health, name="health"), + re_path(r"^stealthz$", views.stealth, name="stealth"), + re_path(r"^api/search/", include("search.urls"), name="search"), + re_path( + r"^api/probers/get_prober_channel", + views.get_prober_channel, + name="get_prober_channel", + ), + re_path( + r"^api/probers/publishing_status", + views.publishing_status, + name="publishing_status", + ), + re_path( + r"^api/probers/celery_worker_status", + views.celery_worker_status, + name="celery_worker_status", + ), + re_path( + r"^api/probers/task_queue_status", + views.task_queue_status, + name="task_queue_status", + ), + re_path( + r"^api/probers/unapplied_changes_status", + views.unapplied_changes_status, + name="unapplied_changes_status", + ), + re_path(r"^api/sync/$", SyncView.as_view(), name="sync"), + re_path( + r"^api/recommendations/$", RecommendationView.as_view(), name="recommendations" + ), ] # if activated, turn on django prometheus urls if "django_prometheus" in settings.INSTALLED_APPS: urlpatterns += [ - re_path('', include('django_prometheus.urls')), + re_path("", include("django_prometheus.urls")), ] @@ -96,77 +137,216 @@ def get_redirect_url(self, *args, **kwargs): # Add node api enpoints urlpatterns += [ - re_path(r'^api/get_channel_details/(?P[^/]*)$', node_views.get_channel_details, name='get_channel_details'), - re_path(r'^api/get_node_details/(?P[^/]*)$', node_views.get_node_details, name='get_node_details'), - re_path(r'^api/get_node_diff/(?P[^/]*)/(?P[^/]*)$', node_views.get_node_diff, name='get_node_diff'), - re_path(r'^api/generate_node_diff/(?P[^/]*)/(?P[^/]*)$', node_views.generate_node_diff, name='generate_node_diff'), + re_path( + r"^api/get_channel_details/(?P[^/]*)$", + node_views.get_channel_details, + name="get_channel_details", + ), + re_path( + r"^api/get_node_details/(?P[^/]*)$", + node_views.get_node_details, + name="get_node_details", + ), + re_path( + r"^api/get_node_diff/(?P[^/]*)/(?P[^/]*)$", + node_views.get_node_diff, + name="get_node_diff", + ), + re_path( + r"^api/generate_node_diff/(?P[^/]*)/(?P[^/]*)$", + node_views.generate_node_diff, + name="generate_node_diff", + ), ] # Add file api enpoints urlpatterns += [ - re_path(r'^zipcontent/(?P[^/]+)/(?P.*)', zip_views.ZipContentView.as_view(), {}, "zipcontent"), + re_path( + r"^zipcontent/(?P[^/]+)/(?P.*)", + zip_views.ZipContentView.as_view(), + {}, + "zipcontent", + ), ] # Add settings endpoints urlpatterns += [ - re_path(r'^api/delete_user_account/$', settings_views.DeleteAccountView.as_view(), name='delete_user_account'), - re_path(r'^api/export_user_data/$', settings_views.export_user_data, name='export_user_data'), - re_path(r'^api/change_password/$', settings_views.UserPasswordChangeView.as_view(), name='change_password'), - re_path(r'^api/update_user_full_name/$', settings_views.UsernameChangeView.as_view(), name='update_user_full_name'), - re_path(r'^settings/issues', settings_views.IssuesSettingsView.as_view(), name='issues_settings'), - re_path(r'^settings/request_storage', settings_views.StorageSettingsView.as_view(), name='request_storage'), - re_path(r'^policies/update', settings_views.PolicyAcceptView.as_view(), name='policy_update'), + re_path( + r"^api/delete_user_account/$", + settings_views.DeleteAccountView.as_view(), + name="delete_user_account", + ), + re_path( + r"^api/export_user_data/$", + settings_views.export_user_data, + name="export_user_data", + ), + re_path( + r"^api/change_password/$", + settings_views.UserPasswordChangeView.as_view(), + name="change_password", + ), + re_path( + r"^api/update_user_full_name/$", + settings_views.UsernameChangeView.as_view(), + name="update_user_full_name", + ), + re_path( + r"^settings/issues", + settings_views.IssuesSettingsView.as_view(), + name="issues_settings", + ), + re_path( + r"^settings/request_storage", + settings_views.StorageSettingsView.as_view(), + name="request_storage", + ), + re_path( + r"^policies/update", + settings_views.PolicyAcceptView.as_view(), + name="policy_update", + ), ] # Add internal endpoints urlpatterns += [ - re_path(r'^api/internal/authenticate_user_internal$', internal_views.authenticate_user_internal, name="authenticate_user_internal"), - re_path(r'^api/internal/check_version$', internal_views.check_version, name="check_version"), - re_path(r'^api/internal/file_diff$', internal_views.file_diff, name="file_diff"), - re_path(r'^api/internal/file_upload$', internal_views.api_file_upload, name="api_file_upload"), - re_path(r'^api/internal/publish_channel$', internal_views.api_publish_channel, name="api_publish_channel"), - re_path(r'^api/internal/check_user_is_editor$', internal_views.check_user_is_editor, name='check_user_is_editor'), - re_path(r'^api/internal/get_tree_data$', internal_views.get_tree_data, name='get_tree_data'), - re_path(r'^api/internal/get_node_tree_data$', internal_views.get_node_tree_data, name='get_node_tree_data'), - re_path(r'^api/internal/create_channel$', internal_views.api_create_channel_endpoint, name="api_create_channel"), - re_path(r'^api/internal/add_nodes$', internal_views.api_add_nodes_to_tree, name="api_add_nodes_to_tree"), - re_path(r'^api/internal/finish_channel$', internal_views.api_commit_channel, name="api_finish_channel"), - re_path(r'^api/internal/get_channel_status_bulk$', internal_views.get_channel_status_bulk, name="get_channel_status_bulk"), + re_path( + r"^api/internal/authenticate_user_internal$", + internal_views.authenticate_user_internal, + name="authenticate_user_internal", + ), + re_path( + r"^api/internal/check_version$", + internal_views.check_version, + name="check_version", + ), + re_path(r"^api/internal/file_diff$", internal_views.file_diff, name="file_diff"), + re_path( + r"^api/internal/file_upload$", + internal_views.api_file_upload, + name="api_file_upload", + ), + re_path( + r"^api/internal/publish_channel$", + internal_views.api_publish_channel, + name="api_publish_channel", + ), + re_path( + r"^api/internal/check_user_is_editor$", + internal_views.check_user_is_editor, + name="check_user_is_editor", + ), + re_path( + r"^api/internal/get_tree_data$", + internal_views.get_tree_data, + name="get_tree_data", + ), + re_path( + r"^api/internal/get_node_tree_data$", + internal_views.get_node_tree_data, + name="get_node_tree_data", + ), + re_path( + r"^api/internal/create_channel$", + internal_views.api_create_channel_endpoint, + name="api_create_channel", + ), + re_path( + r"^api/internal/add_nodes$", + internal_views.api_add_nodes_to_tree, + name="api_add_nodes_to_tree", + ), + re_path( + r"^api/internal/finish_channel$", + internal_views.api_commit_channel, + name="api_finish_channel", + ), + re_path( + r"^api/internal/get_channel_status_bulk$", + internal_views.get_channel_status_bulk, + name="get_channel_status_bulk", + ), ] # Add admin endpoints urlpatterns += [ - re_path(r'^api/send_custom_email/$', admin_views.send_custom_email, name='send_custom_email'), + re_path( + r"^api/send_custom_email/$", + admin_views.send_custom_email, + name="send_custom_email", + ), ] -urlpatterns += [re_path(r'^jsreverse/$', django_js_reverse_views.urls_js, name='js_reverse')] +urlpatterns += [ + re_path(r"^jsreverse/$", django_js_reverse_views.urls_js, name="js_reverse") +] # I18N Endpoints urlpatterns += [ - re_path(r'^i18n/', include('django.conf.urls.i18n')), + re_path(r"^i18n/", include("django.conf.urls.i18n")), ] # Include all URLS prefixed by language urlpatterns += i18n_patterns( - re_path(r'^$', views.base, name='base'), + re_path(r"^$", views.base, name="base"), re_path(r"^i18n/setlang/$", views.set_language, name="set_language"), - re_path(r'^channels/$', views.channel_list, name='channels'), + re_path(r"^channels/$", views.channel_list, name="channels"), # Redirect deprecated staging URL to new URL - re_path(r'^channels/(?P[^/]{32})/staging/$', StagingPageRedirectView.as_view(), name='staging_redirect'), - re_path(r'^channels/(?P[^/]{32})/$', views.channel, name='channel'), - re_path(r'^accounts/login/$', registration_views.login, name='login'), - re_path(r'^accounts/logout/$', registration_views.logout, name='logout'), - re_path(r'^accounts/request_activation_link/$', registration_views.request_activation_link, name='request_activation_link'), + re_path( + r"^channels/(?P[^/]{32})/staging/$", + StagingPageRedirectView.as_view(), + name="staging_redirect", + ), + re_path(r"^channels/(?P[^/]{32})/$", views.channel, name="channel"), + re_path(r"^accounts/login/$", registration_views.login, name="login"), + re_path(r"^accounts/logout/$", registration_views.logout, name="logout"), + re_path( + r"^accounts/request_activation_link/$", + registration_views.request_activation_link, + name="request_activation_link", + ), re_path(r"^accounts/$", views.accounts, name="accounts"), - path(r'accounts/password/reset/', registration_views.UserPasswordResetView.as_view(), name='auth_password_reset'), - path(r'accounts/password/reset/confirm///', registration_views.UserPasswordResetConfirmView.as_view(), name='auth_password_reset_confirm'), - re_path(r'^accounts/register/$', registration_views.UserRegistrationView.as_view(), name='register'), - re_path(r'^activate/(?P[-:\w]+)/$', registration_views.UserActivationView.as_view(), name='registration_activate'), - re_path(r'^api/send_invitation_email/$', registration_views.send_invitation_email, name='send_invitation_email'), - re_path(r'^new/accept_invitation/(?P[^/]+)/', registration_views.new_user_redirect, name="accept_invitation_and_registration"), - re_path(r'^api/deferred_user_space_by_kind/$', registration_views.deferred_user_space_by_kind, name="deferred_user_space_by_kind"), - re_path(r'^api/deferred_user_api_token/$', registration_views.deferred_user_api_token, name="deferred_user_api_token"), - re_path(r'^settings/$', settings_views.settings, name='settings'), - re_path(r'^administration/', admin_views.administration, name='administration'), - re_path(r'^manifest.webmanifest$', pwa.ManifestView.as_view(), name="manifest"), + path( + r"accounts/password/reset/", + registration_views.UserPasswordResetView.as_view(), + name="auth_password_reset", + ), + path( + r"accounts/password/reset/confirm///", + registration_views.UserPasswordResetConfirmView.as_view(), + name="auth_password_reset_confirm", + ), + re_path( + r"^accounts/register/$", + registration_views.UserRegistrationView.as_view(), + name="register", + ), + re_path( + r"^activate/(?P[-:\w]+)/$", + registration_views.UserActivationView.as_view(), + name="registration_activate", + ), + re_path( + r"^api/send_invitation_email/$", + registration_views.send_invitation_email, + name="send_invitation_email", + ), + re_path( + r"^new/accept_invitation/(?P[^/]+)/", + registration_views.new_user_redirect, + name="accept_invitation_and_registration", + ), + re_path( + r"^api/deferred_user_space_by_kind/$", + registration_views.deferred_user_space_by_kind, + name="deferred_user_space_by_kind", + ), + re_path( + r"^api/deferred_user_api_token/$", + registration_views.deferred_user_api_token, + name="deferred_user_api_token", + ), + re_path(r"^settings/$", settings_views.settings, name="settings"), + re_path(r"^administration/", admin_views.administration, name="administration"), + re_path(r"^manifest.webmanifest$", pwa.ManifestView.as_view(), name="manifest"), ) diff --git a/contentcuration/contentcuration/utils/assessment/__init__.py b/contentcuration/contentcuration/utils/assessment/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/contentcuration/contentcuration/utils/assessment/base.py b/contentcuration/contentcuration/utils/assessment/base.py new file mode 100644 index 0000000000..c78547707b --- /dev/null +++ b/contentcuration/contentcuration/utils/assessment/base.py @@ -0,0 +1,381 @@ +import hashlib +import json +import logging +import os +import re +import zipfile +from abc import ABC +from abc import abstractmethod +from io import BytesIO +from tempfile import NamedTemporaryFile +from tempfile import TemporaryDirectory + +from django.core.files import File +from django.core.files.storage import default_storage as storage +from le_utils.constants import exercises +from PIL import Image + +from contentcuration import models + + +image_pattern = rf"!\[([^\]]*)]\(\${exercises.CONTENT_STORAGE_PLACEHOLDER}/([^\s)]+)(?:\s=([0-9\.]+)x([0-9\.]+))*[^)]*\)" + + +def resize_image(image_content, width, height): + try: + with Image.open(BytesIO(image_content)) as img: + original_format = img.format + img = img.resize((int(width), int(height)), Image.LANCZOS) + buffered = BytesIO() + img.save(buffered, format=original_format) + return buffered.getvalue() + except Exception as e: + logging.warning(f"Error resizing image: {str(e)}") + return None + + +def get_resized_image_checksum(image_content): + return hashlib.md5(image_content).hexdigest() + + +class ExerciseArchiveGenerator(ABC): + """ + Abstract base class for exercise zip generators. + Handles common functionality for creating exercise zip files for different formats. + """ + + ZIP_DATE_TIME = (2015, 10, 21, 7, 28, 0) + ZIP_COMPRESS_TYPE = zipfile.ZIP_DEFLATED + ZIP_COMMENT = "".encode() + + @property + @abstractmethod + def file_format(self): + pass + + @property + @abstractmethod + def preset(self): + pass + + @abstractmethod + def get_image_file_path(self): + """ + Abstract method to get the archive file path for storing assessment image files. + + Returns: + str: The file path for images in the exercise archive + """ + pass + + @abstractmethod + def get_image_ref_prefix(self): + """ + A value to insert in front of the image path - this adds both the special placeholder + that our Perseus viewer uses to find images, and the relative path to the images directory. + """ + pass + + @abstractmethod + def create_assessment_item(self, assessment_item, processed_data): + """ + Abstract method to create an assessment item from processed data. + Args: + assessment_item: The assessment item to process + processed_data: Data processed from the assessment item + Returns: + filepath: Path for the created assessment item file + file_content: Content of the assessment item file + """ + pass + + def __init__( + self, ccnode, exercise_data, channel_id, default_language, user_id=None + ): + """ + Initialize the exercise zip generator. + + Args: + ccnode: Content node containing exercise data + exercise_data: Data specific to the exercise format + user_id: Optional user ID for tracking who created the exercise + """ + self.ccnode = ccnode + self.exercise_data = exercise_data + self.channel_id = channel_id + self.default_language = default_language + self.user_id = user_id + self.resized_images_map = {} + self.assessment_items = [] + self.files_to_write = [] + self.tempdir = None + + def write_to_zipfile(self, zf, filepath, content): + """ + This method is a copy of the write_file_to_zip_with_neutral_metadata function from ricecooker. + The comment, date_time, and compress_type are parameterized to allow for Perseus to override them. + This can be updated in future when we have a good way to avoid rebuilding perseus files, unless needed. + """ + filepath = filepath.replace("\\", "/") + info = zipfile.ZipInfo(filepath, date_time=self.ZIP_DATE_TIME) + info.comment = self.ZIP_COMMENT + info.compress_type = self.ZIP_COMPRESS_TYPE + info.create_system = 0 + zf.writestr(info, content) + + def add_file_to_write(self, filepath, content): + if self.tempdir is None: + raise RuntimeError( + "Cannot add files to write before creating the temporary directory." + ) + full_path = os.path.join(self.tempdir, filepath) + if os.path.exists(full_path): + return + os.makedirs(os.path.dirname(full_path), exist_ok=True) + with open(full_path, "wb") as f: + f.write(content) + self.files_to_write.append(full_path) + + def _add_original_image(self, checksum, filename, new_file_path): + """Extract original image handling""" + with storage.open( + models.generate_object_storage_name(checksum, filename), "rb" + ) as imgfile: + original_content = imgfile.read() + self.add_file_to_write(os.path.join(new_file_path, filename), original_content) + + def _get_similar_image(self, filename, width, height): + if filename not in self.resized_images_map: + self.resized_images_map[filename] = {} + return None + if (width, height) in self.resized_images_map[filename]: + return self.resized_images_map[filename][(width, height)] + + for key, resized_image in self.resized_images_map[filename].items(): + if ( + abs(key[0] - width) / width < 0.01 + and abs(key[1] - height) / height < 0.01 + ): + return resized_image + + def _resize_image(self, checksum, ext, filename, width, height, new_file_path): + with storage.open( + models.generate_object_storage_name(checksum, filename), + "rb", + ) as imgfile: + original_content = imgfile.read() + + resized_content = resize_image(original_content, width, height) + + if not resized_content: + logging.warning(f"Failed to resize image {filename}. Using original image.") + return + resized_checksum = get_resized_image_checksum(resized_content) + + new_img_ref = f"{resized_checksum}{ext}" + self.resized_images_map[filename][(width, height)] = new_img_ref + self.add_file_to_write( + os.path.join(new_file_path, new_img_ref), resized_content + ) + return new_img_ref + + def _process_single_image( + self, filename, checksum, ext, width, height, new_file_path + ): + if width is None and height is None: + # No resizing needed, just add original + self._add_original_image(checksum, filename, new_file_path) + return filename + + # Try to get similar or create resized image + similar_image = self._get_similar_image(filename, width, height) + if similar_image: + return similar_image + + resized_image = self._resize_image( + checksum, ext, filename, width, height, new_file_path + ) + return resized_image or filename + + def _is_valid_image_filename(self, filename): + checksum, ext = os.path.splitext(filename) + + if not ext: + logging.warning( + "While publishing channel `{}` a filename with no extension was encountered: `{}`".format( + self.channel_id, filename + ) + ) + return False + + try: + int(checksum, 16) # Validate hex checksum + return True + except ValueError: + logging.warning( + "while publishing channel `{}` a filename with an improper checksum was encountered: `{}`".format( + self.channel_id, filename + ) + ) + if os.environ.get("BRANCH_ENVIRONMENT", "") != "master": + raise + return False + + def process_image_strings(self, content): + new_file_path = self.get_image_file_path() + new_image_path = self.get_image_ref_prefix() + image_list = [] + + def _replace_image(img_match): + # Add any image files that haven't been written to the zipfile + filename = img_match.group(2) + width = float(img_match.group(3)) if img_match.group(3) else None + height = float(img_match.group(4)) if img_match.group(4) else None + checksum, ext = os.path.splitext(filename) + + if not self._is_valid_image_filename(filename): + return "" + + if width == 0 or height == 0: + # Can't resize an image to 0 width or height, so just ignore. + return "" + + processed_filename = self._process_single_image( + filename, checksum, ext, width, height, new_file_path + ) + + if width is not None and height is not None: + image_list.append( + { + "name": f"{new_image_path}/{processed_filename}", + "width": width, + "height": height, + } + ) + return f"![{img_match.group(1)}]({new_image_path}/{processed_filename})" + + content = re.sub(image_pattern, _replace_image, content) + + return content, image_list + + def _process_content(self, content): + """ + Process the content to handle images. + + Args: + content: The content string to process + + Returns: + tuple: Processed content and list of image data + """ + return self.process_image_strings(content) + + def _sort_by_order(self, items, item_type): + try: + return sorted(items, key=lambda x: x.get("order")) + except TypeError: + logging.error(f"Unable to sort {item_type}, leaving unsorted.") + return items + + def _process_answers(self, assessment_item): + answer_data = json.loads(assessment_item.answers) + processed_answers = [] + + for answer in answer_data: + if answer["answer"]: + if isinstance(answer["answer"], str): + (answer["answer"], answer_images,) = self._process_content( + answer["answer"], + ) + answer["images"] = answer_images + + processed_answers.append(answer) + + return self._sort_by_order(processed_answers, "answers") + + def _process_hints(self, assessment_item): + hint_data = json.loads(assessment_item.hints) + + for hint in hint_data: + hint["hint"], hint_images = self._process_content( + hint["hint"], + ) + hint["images"] = hint_images + + return self._sort_by_order(hint_data, "hints") + + def process_assessment_item(self, assessment_item): + # Process question + question, question_images = self._process_content( + assessment_item.question, + ) + + # Process answers and hints + processed_answers = self._process_answers(assessment_item) + processed_hints = self._process_hints(assessment_item) + + new_file_path = self.get_image_file_path() + new_image_path = f"{exercises.IMG_PLACEHOLDER}/{new_file_path}" + context = { + "question": question, + "question_images": question_images, + "answers": processed_answers, + "multiple_select": assessment_item.type == exercises.MULTIPLE_SELECTION, + "raw_data": assessment_item.raw_data.replace( + exercises.CONTENT_STORAGE_PLACEHOLDER, new_image_path + ), + "hints": processed_hints, + "randomize": assessment_item.randomize, + } + filepath, file_content = self.create_assessment_item(assessment_item, context) + self.add_file_to_write(filepath, file_content) + + def handle_before_assessment_items(self): + pass + + def handle_after_assessment_items(self): + pass + + def _create_zipfile(self): + filename = "{0}.{ext}".format(self.ccnode.title, ext=self.file_format) + with NamedTemporaryFile(suffix="zip") as tempf: + with zipfile.ZipFile(tempf.name, "w") as zf: + for file_path in self.files_to_write: + with open(file_path, "rb") as f: + self.write_to_zipfile( + zf, + os.path.relpath(file_path, self.tempdir), + f.read(), + ) + file_size = tempf.tell() + tempf.flush() + + self.ccnode.files.filter(preset_id=self.preset).delete() + + assessment_file_obj = models.File.objects.create( + file_on_disk=File(open(tempf.name, "rb"), name=filename), + contentnode=self.ccnode, + file_format_id=self.file_format, + preset_id=self.preset, + original_filename=filename, + file_size=file_size, + uploaded_by_id=self.user_id, + ) + logging.debug( + "Created exercise for {0} with checksum {1}".format( + self.ccnode.title, assessment_file_obj.checksum + ) + ) + + def create_exercise_archive(self): + with TemporaryDirectory() as tempdir: + self.tempdir = tempdir + self.handle_before_assessment_items() + for question in ( + self.ccnode.assessment_items.prefetch_related("files") + .all() + .order_by("order") + ): + self.process_assessment_item(question) + self.handle_after_assessment_items() + self._create_zipfile() diff --git a/contentcuration/contentcuration/utils/assessment/markdown.py b/contentcuration/contentcuration/utils/assessment/markdown.py new file mode 100644 index 0000000000..c34da5dee1 --- /dev/null +++ b/contentcuration/contentcuration/utils/assessment/markdown.py @@ -0,0 +1,135 @@ +import re +import xml.etree.ElementTree as ET + +from latex2mathml.converter import convert +from markdown_it import MarkdownIt +from markdown_it.renderer import RendererProtocol +from markdown_it.rules_block import StateBlock +from markdown_it.rules_inline import StateInline +from markdown_it.token import Token +from markdown_it.utils import EnvType +from markdown_it.utils import OptionsDict + +from contentcuration.utils.assessment.qti.mathml.core import Annotation +from contentcuration.utils.assessment.qti.mathml.core import Semantics + + +# Regex patterns for $$ delimited math +INLINE_PATTERN = re.compile(r"^\$\$([\s\S]+?)\$\$") +BLOCK_PATTERN = re.compile(r"^\$\$([\s\S]+?)\$\$", re.M) + + +def math_inline_func(state: StateInline, silent: bool) -> bool: + """Parse inline math: $$expression$$""" + if not state.src.startswith("$$", state.pos): + return False + + match = INLINE_PATTERN.match(state.src[state.pos :]) + if not match: + return False + + if not silent: + token = state.push("math_inline", "math", 0) + token.content = match.group(1) + token.markup = "$$" + + state.pos += match.end() + return True + + +def math_block_func( + state: StateBlock, begLine: int, endLine: int, silent: bool +) -> bool: + """Parse block math: $$expression$$""" + begin = state.bMarks[begLine] + state.tShift[begLine] + + if not state.src.startswith("$$", begin): + return False + + match = BLOCK_PATTERN.match(state.src[begin:]) + if not match: + return False + + if not silent: + token = state.push("math_block", "math", 0) + token.block = True + token.content = match.group(1) + token.markup = "$$" + + # Advance to next line after the math block + endpos = begin + match.end() - 1 + line = begLine + while line < endLine: + if endpos >= state.bMarks[line] and endpos <= state.eMarks[line]: + state.line = line + 1 + break + line += 1 + + return True + + +def _convert(latex, inline=True): + # Remove the namespace declaration for cleaner output + markup = convert(latex, display="inline" if inline else "block").replace( + ' xmlns="http://www.w3.org/1998/Math/MathML"', "" + ) + # By default latex2mathml encodes operators that don't need to be encoded + # so we parse it with ElementTree and turn it back into a string here for consistency. + math_element = ET.fromstring(markup) + + # Create LaTeX annotation + latex_annotation_element = Annotation( + encoding="application/x-tex", children=[latex] + ).to_element() + + semantics_element = Semantics().to_element() + for child in math_element: + math_element.remove(child) + semantics_element.append(child) + semantics_element.append(latex_annotation_element) + math_element.append(semantics_element) + + return ET.tostring(math_element, encoding="unicode") + + +def render_math_inline( + self: RendererProtocol, + tokens: list[Token], + idx: int, + options: OptionsDict, + env: EnvType, +) -> str: + """Render inline math to MathML""" + return _convert(tokens[idx].content) + + +def render_math_block( + self: RendererProtocol, + tokens: list[Token], + idx: int, + options: OptionsDict, + env: EnvType, +) -> str: + """Render block math to MathML""" + return _convert(tokens[idx].content, inline=False) + + +def texmath_to_mathml_plugin(md: MarkdownIt) -> None: + """Simple plugin for parsing TeX math with $$ delimiters. + + Converts inline and block math expressions to MathML using latex2mathml. + """ + # Register parsing rules + md.inline.ruler.before("escape", "math_inline", math_inline_func) + md.block.ruler.before("fence", "math_block", math_block_func) + + # Register renderers + md.add_render_rule("math_inline", render_math_inline) + md.add_render_rule("math_block", render_math_block) + + +md = MarkdownIt("gfm-like").disable("linkify").use(texmath_to_mathml_plugin) + + +def render_markdown(markdown): + return md.render(markdown) diff --git a/contentcuration/contentcuration/utils/assessment/perseus.py b/contentcuration/contentcuration/utils/assessment/perseus.py new file mode 100644 index 0000000000..e96ebbae49 --- /dev/null +++ b/contentcuration/contentcuration/utils/assessment/perseus.py @@ -0,0 +1,131 @@ +import json +import re +import zipfile + +from django.core.files.storage import default_storage as storage +from django.template.loader import render_to_string +from le_utils.constants import exercises +from le_utils.constants import file_formats +from le_utils.constants import format_presets + +from contentcuration import models +from contentcuration.utils.assessment.base import ExerciseArchiveGenerator +from contentcuration.utils.parser import extract_value + + +_DOUBLE_DOLLAR_RE = re.compile(r"\$\$(.+?)\$\$", flags=re.DOTALL) + + +class PerseusExerciseGenerator(ExerciseArchiveGenerator): + """ + Exercise zip generator for Perseus format exercises. + """ + + ZIP_DATE_TIME = (2013, 3, 14, 1, 59, 26) + ZIP_COMPRESS_TYPE = zipfile.ZIP_STORED + ZIP_COMMENT = "Perseus file generated during export process".encode() + + file_format = file_formats.PERSEUS + preset = format_presets.EXERCISE + + TEMPLATE_MAP = { + exercises.MULTIPLE_SELECTION: "perseus/multiple_selection.json", + exercises.SINGLE_SELECTION: "perseus/multiple_selection.json", + exercises.INPUT_QUESTION: "perseus/input_question.json", + exercises.PERSEUS_QUESTION: "perseus/perseus_question.json", + "true_false": "perseus/multiple_selection.json", + } + + def _write_raw_perseus_image_files(self, assessment_item): + # For raw perseus JSON questions, the files must be + # specified in advance. + + # Files have been prefetched when the assessment item was + # queried, so take advantage of that. + files = sorted(assessment_item.files.all(), key=lambda x: x.checksum) + image_files = filter( + lambda x: x.preset_id == format_presets.EXERCISE_IMAGE, files + ) + graphie_files = filter( + lambda x: x.preset_id == format_presets.EXERCISE_GRAPHIE, files + ) + images_path = self.get_image_file_path() + for image in image_files: + image_name = "{}/{}.{}".format( + images_path, image.checksum, image.file_format_id + ) + with storage.open( + models.generate_object_storage_name(image.checksum, str(image)), + "rb", + ) as content: + self.add_file_to_write(image_name, content.read()) + + for image in graphie_files: + svg_name = "{}/{}.svg".format(images_path, image.original_filename) + json_name = "{}/{}-data.json".format(images_path, image.original_filename) + with storage.open( + models.generate_object_storage_name(image.checksum, str(image)), + "rb", + ) as content: + content = content.read() + # in Python 3, delimiter needs to be in bytes format + content = content.split(exercises.GRAPHIE_DELIMITER.encode("ascii")) + if len(content) != 2: + raise ValueError( + f"Graphie file '{image.original_filename}' " + f"missing delimiter {exercises.GRAPHIE_DELIMITER!r}" + ) + self.add_file_to_write(svg_name, content[0]) + self.add_file_to_write(json_name, content[1]) + + def _process_formulas(self, content): + return _DOUBLE_DOLLAR_RE.sub(r"$\1$", content) + + def _process_content(self, content): + content = self._process_formulas(content) + return super()._process_content(content) + + def process_assessment_item(self, assessment_item): + if assessment_item.type == exercises.PERSEUS_QUESTION: + self._write_raw_perseus_image_files(assessment_item) + return super().process_assessment_item(assessment_item) + + def _process_input_answers(self, processed_data): + """Extract input answer processing logic""" + non_empty_answers = [] + for answer in processed_data["answers"]: + answer["answer"] = extract_value(answer["answer"]) + if answer["answer"] or answer["answer"] == 0: + non_empty_answers.append(answer) + + return {**processed_data, "answers": non_empty_answers} + + def create_assessment_item(self, assessment_item, processed_data): + template = self.TEMPLATE_MAP.get(assessment_item.type) + if not template: + raise TypeError( + f"Unrecognized question type on item {assessment_item.assessment_id}: {assessment_item.type}" + ) + + # Handle input question special case + if assessment_item.type == exercises.INPUT_QUESTION: + processed_data = self._process_input_answers(processed_data) + + filename = f"{assessment_item.assessment_id}.json" + content = render_to_string(template, processed_data).encode("utf-8", "ignore") + return filename, content + + def get_image_file_path(self): + return "images" + + def get_image_ref_prefix(self): + return f"${exercises.IMG_PLACEHOLDER}/images" + + def handle_before_assessment_items(self): + exercise_context = { + "exercise": json.dumps(self.exercise_data, sort_keys=True, indent=4) + } + exercise_result = render_to_string( + "perseus/exercise.json", exercise_context + ).encode("utf-8") + self.add_file_to_write("exercise.json", exercise_result) diff --git a/contentcuration/contentcuration/utils/assessment/qti/__init__.py b/contentcuration/contentcuration/utils/assessment/qti/__init__.py new file mode 100644 index 0000000000..c8cb0afb95 --- /dev/null +++ b/contentcuration/contentcuration/utils/assessment/qti/__init__.py @@ -0,0 +1,6 @@ +from .base import ElementTreeBase + + +__all__ = [ + "ElementTreeBase", +] diff --git a/contentcuration/contentcuration/utils/assessment/qti/archive.py b/contentcuration/contentcuration/utils/assessment/qti/archive.py new file mode 100644 index 0000000000..7574192a61 --- /dev/null +++ b/contentcuration/contentcuration/utils/assessment/qti/archive.py @@ -0,0 +1,297 @@ +import base64 +from typing import Any +from typing import Dict +from typing import List +from typing import Tuple + +from le_utils.constants import exercises +from le_utils.constants import format_presets + +from contentcuration.utils.assessment.base import ExerciseArchiveGenerator +from contentcuration.utils.assessment.markdown import render_markdown +from contentcuration.utils.assessment.qti.assessment_item import AssessmentItem +from contentcuration.utils.assessment.qti.assessment_item import CorrectResponse +from contentcuration.utils.assessment.qti.assessment_item import ItemBody +from contentcuration.utils.assessment.qti.assessment_item import OutcomeDeclaration +from contentcuration.utils.assessment.qti.assessment_item import ResponseDeclaration +from contentcuration.utils.assessment.qti.assessment_item import ResponseProcessing +from contentcuration.utils.assessment.qti.assessment_item import Value +from contentcuration.utils.assessment.qti.base import ElementTreeBase +from contentcuration.utils.assessment.qti.constants import BaseType +from contentcuration.utils.assessment.qti.constants import Cardinality +from contentcuration.utils.assessment.qti.constants import Orientation +from contentcuration.utils.assessment.qti.constants import ResourceType +from contentcuration.utils.assessment.qti.constants import ShowHide +from contentcuration.utils.assessment.qti.html import Div +from contentcuration.utils.assessment.qti.html import FlowContentList +from contentcuration.utils.assessment.qti.html import P +from contentcuration.utils.assessment.qti.imsmanifest import File as ManifestFile +from contentcuration.utils.assessment.qti.imsmanifest import Manifest +from contentcuration.utils.assessment.qti.imsmanifest import Metadata +from contentcuration.utils.assessment.qti.imsmanifest import Resource +from contentcuration.utils.assessment.qti.imsmanifest import Resources +from contentcuration.utils.assessment.qti.interaction_types.simple import ( + ChoiceInteraction, +) +from contentcuration.utils.assessment.qti.interaction_types.simple import SimpleChoice +from contentcuration.utils.assessment.qti.interaction_types.text_based import ( + TextEntryInteraction, +) +from contentcuration.utils.assessment.qti.prompt import Prompt + + +choice_interactions = { + exercises.MULTIPLE_SELECTION, + exercises.SINGLE_SELECTION, + "true_false", +} +text_entry_interactions = {exercises.INPUT_QUESTION, exercises.FREE_RESPONSE} + + +def hex_to_qti_id(hex_string): + """ + Encode a 32 digit hex to a 22 character base64 encoded id and a K prefix. + """ + bytes_data = bytes.fromhex(hex_string) + return f"K{base64.urlsafe_b64encode(bytes_data).decode('ascii').rstrip('=')}" + + +class QTIExerciseGenerator(ExerciseArchiveGenerator): + """ + Exercise zip generator for QTI format exercises. + Creates IMS Content Package with QTI 3.0 assessment items. + """ + + file_format = "zip" + preset = format_presets.QTI_ZIP + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.qti_items = [] + + def get_image_file_path(self) -> str: + """Get the file path for QTI assessment items.""" + return "items/images" + + def get_image_ref_prefix(self): + """ + Because we put items in a subdirectory, we need to prefix the image paths + with the relative path to the images directory. + """ + return "images" + + def _create_html_content_from_text(self, text: str) -> FlowContentList: + """Convert text content to QTI HTML flow content.""" + if not text.strip(): + return [] + markup = render_markdown(text) + return ElementTreeBase.from_string(markup) + + def _create_choice_interaction_and_response( + self, processed_data: Dict[str, Any] + ) -> Tuple[ChoiceInteraction, ResponseDeclaration]: + """Create a QTI choice interaction for multiple choice questions.""" + + prompt = Prompt( + children=self._create_html_content_from_text(processed_data["question"]) + ) + + choices = [] + correct_values = [] + for i, answer in enumerate(processed_data.get("answers", [])): + choice_id = f"choice_{i}" + choice_content = self._create_html_content_from_text( + answer.get("answer", "") + ) + + choice = SimpleChoice( + identifier=choice_id, + children=choice_content, + show_hide=ShowHide.SHOW, + fixed=False, + ) + choices.append(choice) + + if answer.get("correct", False): + correct_values.append(Value(value=choice_id)) + + response_declaration = ResponseDeclaration( + identifier="RESPONSE", + cardinality=Cardinality.MULTIPLE + if processed_data["multiple_select"] + else Cardinality.SINGLE, + base_type=BaseType.IDENTIFIER, + correct_response=CorrectResponse(value=correct_values) + if correct_values + else None, + ) + + interaction = ChoiceInteraction( + response_identifier="RESPONSE", + prompt=prompt, + answers=choices, + shuffle=processed_data.get("randomize", False), + max_choices=len(choices) if processed_data["multiple_select"] else 1, + min_choices=0, + orientation=Orientation.VERTICAL, + ) + return interaction, response_declaration + + def _create_text_entry_interaction_and_response( + self, processed_data: Dict[str, Any] + ) -> Tuple[Div, ResponseDeclaration]: + prompt = self._create_html_content_from_text(processed_data["question"]) + interaction_element = TextEntryInteraction( + response_identifier="RESPONSE", + expected_length=50, # Default expected length + placeholder_text="Enter your answer here", + ) + # Text entry interaction is an inline element, so wrap it in a paragraph tag. + interaction_element = P(children=[interaction_element]) + # prompt is already a list of elements, so just append the interaction to it. + prompt.append(interaction_element) + interaction = Div(children=prompt) + + correct_values = [] + values_float = [] + for answer in processed_data["answers"]: + if answer["correct"]: + correct_values.append(Value(value=str(answer["answer"]))) + try: + float(answer["answer"]) + values_float.append(True) + except ValueError: + values_float.append(False) + float_answer = bool(values_float) and all(values_float) + + response_declaration = ResponseDeclaration( + identifier="RESPONSE", + cardinality=Cardinality.MULTIPLE + if len(correct_values) > 1 + else Cardinality.SINGLE, + base_type=BaseType.FLOAT if float_answer else BaseType.STRING, + correct_response=CorrectResponse(value=correct_values) + if correct_values + else None, + ) + return interaction, response_declaration + + def _qti_item_filepath(self, assessment_id): + return f"items/{assessment_id}.xml" + + def create_assessment_item( + self, assessment_item, processed_data: Dict[str, Any] + ) -> tuple[str, bytes]: + """Create QTI assessment item XML.""" + + # Skip Perseus questions as they can't be easily converted + if assessment_item.type == exercises.PERSEUS_QUESTION: + raise ValueError( + f"Perseus questions are not supported in QTI format: {assessment_item.assessment_id}" + ) + + if assessment_item.type in choice_interactions: + ( + interaction, + response_declaration, + ) = self._create_choice_interaction_and_response(processed_data) + elif assessment_item.type in text_entry_interactions: + ( + interaction, + response_declaration, + ) = self._create_text_entry_interaction_and_response(processed_data) + else: + raise ValueError(f"Unsupported question type: {assessment_item.type}") + + # Create item body with the interaction + item_body = ItemBody(children=[interaction]) + + # Create outcome declaration + outcome_declaration = OutcomeDeclaration( + identifier="SCORE", cardinality=Cardinality.SINGLE, base_type=BaseType.FLOAT + ) + + # Create response processing + response_processing = ResponseProcessing( + template="https://purl.imsglobal.org/spec/qti/v3p0/rptemplates/match_correct" + ) + + language = ( + self.ccnode.language.lang_code + if self.ccnode.language + else self.default_language + ) + + qti_item_id = hex_to_qti_id(assessment_item.assessment_id) + + # Create the assessment item + qti_item = AssessmentItem( + identifier=qti_item_id, + title=f"{self.ccnode.title} {len(self.qti_items) + 1}", + language=language, + adaptive=False, + time_dependent=False, + response_declaration=[response_declaration], + outcome_declaration=[outcome_declaration], + item_body=item_body, + response_processing=response_processing, + ) + + # Store for manifest creation + self.qti_items.append(qti_item) + + # Generate XML content + xml_content = qti_item.to_xml_string() + + # Add XML declaration and format nicely + full_xml = f'\n{xml_content}' + + filename = self._qti_item_filepath(qti_item_id) + return filename, full_xml.encode("utf-8") + + def _create_manifest_resources(self) -> List[Resource]: + """Create manifest resources for all QTI items.""" + resources = [] + + for qti_item in self.qti_items: + # Get file dependencies (images, etc.) + file_dependencies = qti_item.get_file_dependencies() + + # Create file entries + qti_item_filepath = self._qti_item_filepath(qti_item.identifier) + files = [ManifestFile(href=qti_item_filepath)] + for dep in file_dependencies: + files.append(ManifestFile(href=dep)) + + resource = Resource( + identifier=qti_item.identifier, + type_=ResourceType.ASSESSMENT_ITEM.value, + href=qti_item_filepath, + files=files, + ) + resources.append(resource) + + return resources + + def _create_imsmanifest(self) -> str: + # Create resources + resources = self._create_manifest_resources() + + # Create manifest + manifest = Manifest( + identifier=hex_to_qti_id(self.ccnode.content_id), + version="1.0", + metadata=Metadata(schema="QTI Package", schemaversion="3.0.0"), + resources=Resources(resources=resources), + ) + + xml_content = manifest.to_xml_string() + return f'\n{xml_content}' + + def handle_after_assessment_items(self): + # Create and write the IMS manifest + manifest_xml = self._create_imsmanifest() + self.add_file_to_write("imsmanifest.xml", manifest_xml.encode("utf-8")) + # Sort all paths to parallel the predictable zip generation logic in ricecooker + # and the Kolibri Studio frontend. + self.files_to_write = sorted(self.files_to_write) diff --git a/contentcuration/contentcuration/utils/assessment/qti/assessment_item.py b/contentcuration/contentcuration/utils/assessment/qti/assessment_item.py new file mode 100644 index 0000000000..830044ae79 --- /dev/null +++ b/contentcuration/contentcuration/utils/assessment/qti/assessment_item.py @@ -0,0 +1,237 @@ +from typing import Annotated +from typing import List +from typing import Optional +from typing import Union + +from annotated_types import Len +from pydantic import AnyUrl +from pydantic import Field +from pydantic import model_validator +from pydantic import PositiveInt + +from contentcuration.utils.assessment.qti.base import BaseSequence +from contentcuration.utils.assessment.qti.base import QTIBase +from contentcuration.utils.assessment.qti.base import TextType +from contentcuration.utils.assessment.qti.constants import BaseType +from contentcuration.utils.assessment.qti.constants import Cardinality +from contentcuration.utils.assessment.qti.constants import ExternalScored +from contentcuration.utils.assessment.qti.constants import View +from contentcuration.utils.assessment.qti.fields import BCP47Language +from contentcuration.utils.assessment.qti.fields import LocalHrefPath +from contentcuration.utils.assessment.qti.fields import QTIIdentifier +from contentcuration.utils.assessment.qti.html import BlockContentElement +from contentcuration.utils.assessment.qti.interaction_types.base import BlockInteraction + + +class Value(QTIBase): + """ + Represents a single value within a default value, correct response, + or other value container. + + For record values, both the field-identifier and base-type attributes + are required to identify which field of the record this value belongs to + and what type that field is. + + For non-record values (single, multiple, ordered cardinality), these + attributes are optional and typically not needed as the base-type is + determined by the parent variable declaration. + """ + + value: TextType # The actual value content + field_identifier: Optional[QTIIdentifier] = None # Required only for record values + base_type: Optional[BaseType] = None # Required only for record values + + +ValueType = Annotated[List[Value], Len(min_length=1)] + + +class CorrectResponse(QTIBase): + """Defines the correct response for the interaction.""" + + value: ValueType = Field(default_factory=list) + + +class DefaultValue(QTIBase): + """ + Defines the default value for a variable. Contains one or more + value elements depending on the cardinality of the variable. + """ + + value: ValueType = Field(default_factory=list) + # Human readable interpretation of the default value + interpretation: Optional[str] = None + + +def _validate_value(self, attribute_name="default_value"): + attr_value = getattr(self, attribute_name) + if attr_value is not None: + if self.cardinality == Cardinality.SINGLE: + # Single cardinality should have exactly one value + if len(attr_value.value) > 1: + raise ValueError( + f"Single cardinality cannot have multiple {attribute_name.replace('_', ' ')}s" + ) + elif self.cardinality == Cardinality.RECORD: + # Record cardinality requires field identifiers + for value in attr_value.value: + if not value.field_identifier: + raise ValueError( + f"Record cardinality requires field_identifier in {attribute_name.replace('_', ' ')}" + ) + if not value.base_type: + raise ValueError( + f"Record cardinality requires base_type in {attribute_name.replace('_', ' ')}" + ) + + +class OutcomeDeclaration(QTIBase): + """ + QTI outcome declaration defines an outcome variable, which represents the + result of response processing. Outcomes are typically scores but can also + be other results such as feedback identifiers or completion status. + """ + + identifier: QTIIdentifier + cardinality: Cardinality = Cardinality.SINGLE + base_type: Optional[BaseType] = None + view: Optional[View] = None + interpretation: Optional[AnyUrl] = None + long_interpretation: Optional[str] = None + normal_maximum: Optional[PositiveInt] = None + normal_minimum: Optional[float] = None + mastery_value: Optional[float] = None + external_scored: Optional[ExternalScored] = None + variable_identifier_ref: Optional[str] = None + default_value: Optional[DefaultValue] = None + + @model_validator(mode="after") + def validate_cardinality_compatibility(self): + _validate_value(self) + return self + + +class ItemBody(QTIBase, BaseSequence): + """Contains the content of the assessment item""" + + children: List[Union[BlockInteraction, BlockContentElement]] = Field( + default_factory=list + ) + + +class ContextDeclaration(QTIBase): + """ + QTI context declaration defines a 'contextual' variable with global scope to + an assessment item. Context variables provide contextual information to + template processing and response processing, such as candidate information, + test information, and environment information. + """ + + identifier: QTIIdentifier + cardinality: Cardinality + base_type: Optional[BaseType] = None + default_value: Optional[DefaultValue] = None + + @model_validator(mode="after") + def validate_cardinality_compatibility(self): + _validate_value(self) + return self + + +class MapEntry(QTIBase): + """Entry in a mapping that maps a specific value to a score""" + + # Key (usually an identifier) + map_key: str + # Value to map + mapped_value: float + # Whether string comparison is case sensitive + case_sensitive: bool = False + + +class Mapping(QTIBase): + """Maps response values to scores for partial credit scoring""" + + map_entries: List[MapEntry] = Field(default_factory=list) + # Score for responses not explicitly mapped + default_value: float = 0.0 + # Lower bound for mapping results + lower_bound: Optional[float] = None + # Upper bound for mapping results + upper_bound: Optional[float] = None + + +class AreaMapEntry(QTIBase): + """Entry in an area mapping that maps a specific area to a score""" + + # Shape of the area (rect, circle, poly, default) + shape: str + # Coordinates defining the area + coords: str + # Score for responses in this area + mapped_value: float + + +class AreaMapping(QTIBase): + """Maps areas to scores for graphical interactions""" + + area_map_entries: List[AreaMapEntry] = Field(default_factory=list) + # Score for responses not in any defined area + default_value: float = 0.0 + # Lower bound for mapping results + lower_bound: Optional[float] = None + # Upper bound for mapping results + upper_bound: Optional[float] = None + + +class ResponseDeclaration(QTIBase): + """ + QTI response declaration defines a response variable and optionally its + correct response value and/or mapping. Response variables capture candidate + interactions with the assessment item's interactions and are used in response + processing to determine outcomes. + """ + + identifier: QTIIdentifier + cardinality: Cardinality + base_type: BaseType + correct_response: Optional[CorrectResponse] = None + mapping: Optional[Mapping] = None + area_mapping: Optional[AreaMapping] = None + + @model_validator(mode="after") + def validate_cardinality_compatibility(self): + _validate_value(self, "correct_response") + return self + + +class ResponseProcessing(QTIBase): + """Represents response processing rules or template reference""" + + # URI reference to a response processing template + template: Optional[AnyUrl] = None + # Optional URL that resolves to the template - we additionally enforce that this be local + # although this is not required by the QTI spec + template_location: Optional[LocalHrefPath] = None + # rules deliberately not implemented yet + + +class AssessmentItem(QTIBase): + """Represents a QTI assessment item""" + + xmlns: str = "http://www.imsglobal.org/xsd/imsqtiasi_v3p0" + xmlns__xsi: str = "http://www.w3.org/2001/XMLSchema-instance" + xsi__schemaLocation: str = "http://www.imsglobal.org/xsd/imsqtiasi_v3p0 https://purl.imsglobal.org/spec/qti/v3p0/schema/xsd/imsqti_asiv3p0p1_v1p0.xsd" + identifier: QTIIdentifier + title: str + label: Optional[str] = None + adaptive: bool = False + time_dependent: Optional[bool] = None + language: BCP47Language + tool_name: str = "kolibri" + tool_version: str = "0.1" + + context_declaration: List[ContextDeclaration] = Field(default_factory=list) + response_declaration: List[ResponseDeclaration] = Field(default_factory=list) + outcome_declaration: List[OutcomeDeclaration] = Field(default_factory=list) + item_body: Optional[ItemBody] = None + response_processing: Optional[ResponseProcessing] = None diff --git a/contentcuration/contentcuration/utils/assessment/qti/base.py b/contentcuration/contentcuration/utils/assessment/qti/base.py new file mode 100644 index 0000000000..5467654a6b --- /dev/null +++ b/contentcuration/contentcuration/utils/assessment/qti/base.py @@ -0,0 +1,274 @@ +import re +import xml.etree.ElementTree as ET +from abc import ABC +from enum import Enum +from functools import partial +from typing import Annotated +from typing import List +from typing import Optional +from typing import Set +from typing import Type +from typing import Union + +from pydantic import BaseModel +from pydantic import BeforeValidator +from pydantic import ConfigDict +from pydantic import PrivateAttr + +from .constants import Dir +from .fields import entry_pattern as srcset_entry_pattern +from contentcuration.utils.assessment.qti.fields import BCP47Language + + +class TextNode(BaseModel): + """Class to represent text nodes within XML elements""" + + text: str + + +class XMLElement(BaseModel, ABC): + """Base class for XML elements""" + + # Pydantic configuration + model_config = ConfigDict( + # Prevent extra fields + extra="forbid", + validate_assignment=True, + # Prevent mutations to ensure immutability + frozen=True, + ) + + # Private attributes (not included in Pydantic fields) + _file_dependencies: Set[str] = PrivateAttr(default_factory=set) + _element: ET.Element = PrivateAttr(default=None) + + @classmethod + def element_name(cls): + return cls.__name__.lower() + + def to_element(self) -> ET.Element: # noqa: C901 + if self._element: + return self._element + + element = ET.Element(self.element_name()) + + self._file_dependencies = set() + + # Add attributes based on pydantic fields + for field_name in self.__class__.model_fields: + + value = getattr(self, field_name) + + # Skip None values + if value is None: + continue + + if isinstance(value, (XMLElement, TextNode)): + value = [value] + + if isinstance(value, list): + if all(isinstance(item, (XMLElement, TextNode)) for item in value): + for item in value: + if isinstance(item, XMLElement): + child_elements = item.to_element() + if not isinstance(child_elements, list): + child_elements = [child_elements] + for child_element in child_elements: + element.append(child_element) + self._file_dependencies |= item._file_dependencies + else: + current_children = list(element) + if current_children: + current_children[-1].tail = ( + current_children[-1].tail or "" + ) + item.text + else: + element.text = (element.text or "") + item.text + + continue + raise ValueError( + "List types should only contain XMLElement or TextNodes" + ) + + elif isinstance(value, bool): + value = str(value).lower() + + elif isinstance(value, Enum): + # Handle enum values + value = value.value + + # Some attribute names are reserved Python keywords or Python builtins + # to allow this, we allow a trailing underscore which we strip here. + # All attributes use kebab-case, which we can't easily use as field names + # so we encode them as snake_case and convert to kebab-case here. + # Some attributes also include : which we encode as double underscore. + attr_name = field_name.rstrip("_").replace("__", ":").replace("_", "-") + + # Set the attribute + element.set(attr_name, str(value)) + + if attr_name == "src" or attr_name == "href": + self._file_dependencies.add(value) + elif attr_name == "srcset": + entries = re.findall(srcset_entry_pattern, value) + for entry in entries: + # Each entry is a tuple of (url, descriptors) + url = entry[0].strip() + self._file_dependencies.add(url) + + self._element = element + + return self._element + + def to_xml_string(self) -> str: + """Convert to XML string""" + element = self.to_element() + return ET.tostring(element, encoding="unicode") + + def get_file_dependencies(self) -> List[str]: + # Ensure the element has been processed so that the file dependencies are collected. + self.to_element() + return list(self._file_dependencies) + + +class QTIBase(XMLElement): + """ + A base class to allow us to conventionally generate element names from class names for QTI elements. + """ + + @classmethod + def element_name(cls): + # Convert PascalCase to kebab-case + name = re.sub(r"(?<=[a-z])(?=[A-Z])", "-", cls.__name__) + return f"qti-{name.lower()}" + + +def coerce_str_to_model(element_type, value: Union[str, XMLElement]) -> XMLElement: + """Convert string to element_type if needed""" + if isinstance(value, str): + return element_type(text=value) + return value + + +def generate_coerced_string_type(element_type): + return Annotated[ + element_type, BeforeValidator(partial(coerce_str_to_model, element_type)) + ] + + +TextType = generate_coerced_string_type(TextNode) + + +class BaseSequence(XMLElement): + id_: Optional[str] = None + class_: Optional[str] = None + lang: Optional[BCP47Language] = None + # We explicitly do not set the deprecated language value. + label: Optional[str] = None + # We explicitly do not set the base value. + dir_: Optional[Dir] = None + + +# Pydantic's BaseModel Metaclass is only importable from an internal module, +# so we inspect the BaseSequence class to get its metaclass. +BaseSequenceMetaclass = type(BaseSequence) + + +class RegistryMeta(BaseSequenceMetaclass): + """Generic metaclass that creates separate registries for each subclass""" + + def __new__(mcs, name, bases, attrs): + cls = super().__new__(mcs, name, bases, attrs) + + # Each metaclass gets its own registry + if not hasattr(mcs, "_registry"): + mcs._registry = {} + + element_name = cls.element_name() + if element_name in mcs._registry and mcs._registry[element_name] is not cls: + raise ValueError( + f"Element name '{element_name}' already registered in {mcs.__name__}" + ) + mcs._registry[element_name] = cls + + return cls + + @classmethod + def _ensure_registry_complete(cls): + """Ensure all HTML and MathML classes are registered""" + if not hasattr(cls, "_registry_initialized"): + # Import modules to trigger registration + from contentcuration.utils.assessment.qti import html, mathml # noqa: F401 + + cls._registry_initialized = True + + @classmethod + def get_class_for_tag(cls, tag_name: str) -> Optional[Type]: + """Get the registered class for a given tag name""" + cls._ensure_registry_complete() + return getattr(cls, "_registry", {}).get(tag_name) + + +class ElementTreeBase(BaseSequence, metaclass=RegistryMeta): + @classmethod + def from_element(cls, element: ET.Element) -> "ElementTreeBase": + # Get the appropriate class for this tag + target_class = type(cls).get_class_for_tag(element.tag) + if target_class is None: + raise ValueError(f"No registered class found for tag: {element.tag}") + + # Convert attributes to field data - Pydantic will handle type coercion + field_data = {} + for attr_name, attr_value in element.attrib.items(): + field_name = cls._attr_name_to_field_name(attr_name) + field_data[field_name] = attr_value + + # Convert children and text + children = cls._extract_children(element) + if children: + field_data["children"] = children + + return target_class(**field_data) + + @classmethod + def _attr_name_to_field_name(cls, attr_name: str) -> str: + """Convert attribute name to Python field name""" + # kebab-case -> snake_case, : -> __ + field_name = attr_name.replace(":", "__").replace("-", "_") + + # Add trailing underscore for Python keywords + if field_name in {"class", "for", "type", "id", "dir"}: + field_name += "_" + + return field_name + + @classmethod + def _extract_children( + cls, element: ET.Element + ) -> List[Union["ElementTreeBase", TextNode]]: + """Extract child elements and text nodes from XML element""" + children = [] + + # Add initial text if present + if element.text and element.text.strip(): + children.append(TextNode(text=element.text)) + + # Process child elements + for child_elem in element: + children.append(cls.from_element(child_elem)) + # Add tail text after child element + if child_elem.tail and child_elem.tail.strip(): + children.append(TextNode(text=child_elem.tail)) + + return children + + @classmethod + def from_string(cls, string: str) -> List["ElementTreeBase"]: + """Parse markup string and return list of ElementTreeBase instances""" + try: + # Wrap in a root element to handle multiple top-level elements + wrapped_markup = f"{string}" + root = ET.fromstring(wrapped_markup) + return [cls.from_element(child) for child in root] + except ET.ParseError as e: + raise ValueError(f"Invalid Markup: {e}") from e diff --git a/contentcuration/contentcuration/utils/assessment/qti/constants.py b/contentcuration/contentcuration/utils/assessment/qti/constants.py new file mode 100644 index 0000000000..99ea507af3 --- /dev/null +++ b/contentcuration/contentcuration/utils/assessment/qti/constants.py @@ -0,0 +1,69 @@ +from enum import Enum + + +# QTI Constants for Outcome Declarations + + +class Cardinality(Enum): + MULTIPLE = "multiple" + SINGLE = "single" + ORDERED = "ordered" + RECORD = "record" + + +class BaseType(Enum): + BOOLEAN = "boolean" + DIRECTED_PAIR = "directedPair" + DURATION = "duration" + FILE = "file" + FLOAT = "float" + IDENTIFIER = "identifier" + INTEGER = "integer" + PAIR = "pair" + POINT = "point" + STRING = "string" + URI = "uri" + + +class View(Enum): + AUTHOR = "author" + CANDIDATE = "candidate" + PROCTOR = "proctor" + SCORER = "scorer" + TEST_CONSTRUCTOR = "testConstructor" + TUTOR = "tutor" + + +class ExternalScored(Enum): + EXTERNAL_MACHINE = "externalMachine" + HUMAN = "human" + + +class ShowHide(Enum): + SHOW = "show" + HIDE = "hide" + + +class Dir(Enum): + LTR = "ltr" + RTL = "rtl" + AUTO = "auto" + + +class Format(Enum): + PLAIN = "plain" + PREFORMATTED = "preformatted" + XHTML = "xhtml" + + +class Orientation(Enum): + HORIZONTAL = "horizontal" + VERTICAL = "vertical" + + +class ResourceType(Enum): + """Enumeration for QTI resource types""" + + ASSESSMENT_TEST = "imsqti_test_xmlv3p0" + ASSESSMENT_ITEM = "imsqti_item_xmlv3p0" + RESPONSE_TEMPLATE = "imsqti_rptemplate_xmlv3p0" diff --git a/contentcuration/contentcuration/utils/assessment/qti/fields.py b/contentcuration/contentcuration/utils/assessment/qti/fields.py new file mode 100644 index 0000000000..f90b6d30e8 --- /dev/null +++ b/contentcuration/contentcuration/utils/assessment/qti/fields.py @@ -0,0 +1,118 @@ +import re +from typing import Annotated +from urllib.parse import urlparse + +from langcodes import Language as LangCodesLanguage +from pydantic import BeforeValidator +from pydantic import Field + + +def validate_bcp47_language(value: str) -> str: + """Validate and normalize BCP47 language tag.""" + if not isinstance(value, str): + raise ValueError(f"BCP47 language tag must be a string, got {type(value)}") + + if not value: + raise ValueError("BCP47 language tag cannot be empty") + + try: + # Validate and normalize using langcodes + return LangCodesLanguage.get(value).to_tag() + except ValueError as e: + raise ValueError("Invalid BCP47 language tag") from e + + +BCP47Language = Annotated[str, BeforeValidator(validate_bcp47_language)] + +data_uri_pattern = r"data:(?:([-\w]+/[-+\w.]+)(?:(;[-\w]+=[-\w]+)*))?(;base64)?,(.*)" + +data_uri_regex = re.compile(rf"^{data_uri_pattern}$") + + +def validate_data_uri(value: str) -> str: + """ + Validate data URI format according to RFC 2397. + Format: data:[][;base64], + """ + + match = data_uri_regex.match(value) + if not match: + raise ValueError(f"Invalid data URI format: {value}") + + return value + + +def validate_local_href_path(value: str) -> str: + """ + Validate that a path is relative (no scheme) and suitable for offline bundling. + Allows: relative/path.jpg, ../path.jpg, ./file.png, #fragment, data:... + Rejects: http://..., https://..., ftp://..., etc. + """ + parsed = urlparse(value) + # Allow data URLs (for embedded content) + if parsed.scheme == "data": + return validate_data_uri(value) + + # Reject absolute URLs + if parsed.scheme or parsed.netloc or parsed.path.startswith("/"): + raise ValueError(f"Absolute URLs not allowed in bundled content: {value}") + + return value + + +def validate_local_src_path(value: str) -> str: + """ + Validate local src paths - stricter than href, should be actual file paths. + """ + value = validate_local_href_path(value) + + parsed = urlparse(value) + if not parsed.path: + raise ValueError(f"Invalid local src path: {value}") + + # Allow relative paths + return value + + +# Regex pattern for complete srcset validation +# Matches: (data URI OR regular path) + one or more descriptors (2x, 100w, etc.) +# Separated by commas with optional whitespace +entry_pattern = rf"({data_uri_pattern}|[^\s,]+)(?:\s+\d*\.?\d+[xwh])+" +# Pattern for complete srcset: one or more entries separated by commas +srcset_pattern = rf"^{entry_pattern}(?:\s*,\s*{entry_pattern})*$" + + +def validate_local_srcset(value: str) -> str: + if not value.strip(): + return value + + if not re.match(srcset_pattern, value.strip()): + raise ValueError(f"Invalid srcset format: {value}") + + entries = re.findall(entry_pattern, value) + + for entry in entries: + url = entry[0] + # Only need to validate the URL - descriptors already confirmed valid + validate_local_src_path(url.strip()) + + return value + + +# Custom types for HTML attributes +LocalHrefPath = Annotated[str, BeforeValidator(validate_local_href_path)] +LocalSrcPath = Annotated[str, BeforeValidator(validate_local_src_path)] +LocalSrcSet = Annotated[str, BeforeValidator(validate_local_srcset)] + + +QTIIdentifier = Annotated[ + str, + Field( + pattern=r"^[a-zA-Z_][a-zA-Z0-9_\-]{0,31}$", + min_length=1, + max_length=32, + description="QTI XML identifier: must start with letter or underscore, " + "contain only letters, digits, underscores, and hyphens, " + "no colons, max 32 characters", + ), +] diff --git a/contentcuration/contentcuration/utils/assessment/qti/html/__init__.py b/contentcuration/contentcuration/utils/assessment/qti/html/__init__.py new file mode 100644 index 0000000000..f28fea09f0 --- /dev/null +++ b/contentcuration/contentcuration/utils/assessment/qti/html/__init__.py @@ -0,0 +1,188 @@ +# __init__.py +from contentcuration.utils.assessment.qti.html.base import BlockContentElement +from contentcuration.utils.assessment.qti.html.base import FlowContentElement +from contentcuration.utils.assessment.qti.html.base import HTMLElement +from contentcuration.utils.assessment.qti.html.base import Source +from contentcuration.utils.assessment.qti.html.breaks import Br +from contentcuration.utils.assessment.qti.html.breaks import Hr +from contentcuration.utils.assessment.qti.html.content_types import FlowContent +from contentcuration.utils.assessment.qti.html.content_types import FlowContentList +from contentcuration.utils.assessment.qti.html.content_types import InlineContent +from contentcuration.utils.assessment.qti.html.content_types import InlineContentList +from contentcuration.utils.assessment.qti.html.content_types import InlineGroup +from contentcuration.utils.assessment.qti.html.content_types import InlineGroupList +from contentcuration.utils.assessment.qti.html.display import Details +from contentcuration.utils.assessment.qti.html.display import Figcaption +from contentcuration.utils.assessment.qti.html.display import Figure +from contentcuration.utils.assessment.qti.html.display import Label +from contentcuration.utils.assessment.qti.html.display import Summary +from contentcuration.utils.assessment.qti.html.embed import Img +from contentcuration.utils.assessment.qti.html.embed import Object +from contentcuration.utils.assessment.qti.html.embed import Param +from contentcuration.utils.assessment.qti.html.embed import Picture +from contentcuration.utils.assessment.qti.html.flow import Address +from contentcuration.utils.assessment.qti.html.flow import Article +from contentcuration.utils.assessment.qti.html.flow import Aside +from contentcuration.utils.assessment.qti.html.flow import Blockquote +from contentcuration.utils.assessment.qti.html.flow import Div +from contentcuration.utils.assessment.qti.html.flow import Footer +from contentcuration.utils.assessment.qti.html.flow import Header +from contentcuration.utils.assessment.qti.html.flow import Nav +from contentcuration.utils.assessment.qti.html.flow import Section +from contentcuration.utils.assessment.qti.html.media import Audio +from contentcuration.utils.assessment.qti.html.media import Preload +from contentcuration.utils.assessment.qti.html.media import Track +from contentcuration.utils.assessment.qti.html.media import TrackKind +from contentcuration.utils.assessment.qti.html.media import Video +from contentcuration.utils.assessment.qti.html.sequence import Dd +from contentcuration.utils.assessment.qti.html.sequence import Dl +from contentcuration.utils.assessment.qti.html.sequence import Dt +from contentcuration.utils.assessment.qti.html.sequence import Li +from contentcuration.utils.assessment.qti.html.sequence import Ol +from contentcuration.utils.assessment.qti.html.sequence import OlType +from contentcuration.utils.assessment.qti.html.sequence import Ul +from contentcuration.utils.assessment.qti.html.table import Caption +from contentcuration.utils.assessment.qti.html.table import Col +from contentcuration.utils.assessment.qti.html.table import Colgroup +from contentcuration.utils.assessment.qti.html.table import Table +from contentcuration.utils.assessment.qti.html.table import TBody +from contentcuration.utils.assessment.qti.html.table import Td +from contentcuration.utils.assessment.qti.html.table import TFoot +from contentcuration.utils.assessment.qti.html.table import Th +from contentcuration.utils.assessment.qti.html.table import THead +from contentcuration.utils.assessment.qti.html.table import ThScope +from contentcuration.utils.assessment.qti.html.table import Tr +from contentcuration.utils.assessment.qti.html.table import TrList +from contentcuration.utils.assessment.qti.html.text import A +from contentcuration.utils.assessment.qti.html.text import Abbr +from contentcuration.utils.assessment.qti.html.text import B +from contentcuration.utils.assessment.qti.html.text import Bdi +from contentcuration.utils.assessment.qti.html.text import Bdo +from contentcuration.utils.assessment.qti.html.text import BdoDir +from contentcuration.utils.assessment.qti.html.text import BlockHTMLText +from contentcuration.utils.assessment.qti.html.text import Cite +from contentcuration.utils.assessment.qti.html.text import Code +from contentcuration.utils.assessment.qti.html.text import Dfn +from contentcuration.utils.assessment.qti.html.text import Em +from contentcuration.utils.assessment.qti.html.text import H1 +from contentcuration.utils.assessment.qti.html.text import H2 +from contentcuration.utils.assessment.qti.html.text import H3 +from contentcuration.utils.assessment.qti.html.text import H4 +from contentcuration.utils.assessment.qti.html.text import H5 +from contentcuration.utils.assessment.qti.html.text import H6 +from contentcuration.utils.assessment.qti.html.text import I +from contentcuration.utils.assessment.qti.html.text import InlineHTMLText +from contentcuration.utils.assessment.qti.html.text import Kbd +from contentcuration.utils.assessment.qti.html.text import P +from contentcuration.utils.assessment.qti.html.text import Pre +from contentcuration.utils.assessment.qti.html.text import Q +from contentcuration.utils.assessment.qti.html.text import Rp +from contentcuration.utils.assessment.qti.html.text import Rt +from contentcuration.utils.assessment.qti.html.text import Ruby +from contentcuration.utils.assessment.qti.html.text import Samp +from contentcuration.utils.assessment.qti.html.text import Small +from contentcuration.utils.assessment.qti.html.text import Span +from contentcuration.utils.assessment.qti.html.text import Strong +from contentcuration.utils.assessment.qti.html.text import Sub +from contentcuration.utils.assessment.qti.html.text import Sup +from contentcuration.utils.assessment.qti.html.text import Var + +__all__ = [ + # Base classes + "HTMLElement", + "FlowContentElement", + "BlockContentElement", + "InlineHTMLText", + "BlockHTMLText", + # Content type aliases + "FlowContent", + "FlowContentList", + "InlineContent", + "InlineContentList", + "InlineGroup", + "InlineGroupList", + # Breaks + "Br", + "Hr", + # Display elements + "Details", + "Figcaption", + "Figure", + "Label", + "Summary", + # Embedded content + "Img", + "Object", + "Param", + "Picture", + "Source", + # Flow/sectioning content + "Address", + "Article", + "Aside", + "Blockquote", + "Div", + "Footer", + "Header", + "Nav", + "Section", + # Media elements and enums + "Audio", + "Preload", + "Track", + "TrackKind", + "Video", + # Lists and sequences + "Dd", + "Dl", + "Dt", + "Li", + "Ol", + "OlType", + "Ul", + # Tables and related types + "Caption", + "Col", + "Colgroup", + "Table", + "TBody", + "Td", + "TFoot", + "Th", + "THead", + "ThScope", + "Tr", + "TrList", + # Text content + "A", + "Abbr", + "B", + "Bdi", + "Bdo", + "BdoDir", + "Cite", + "Code", + "Dfn", + "Em", + "H1", + "H2", + "H3", + "H4", + "H5", + "H6", + "I", + "Kbd", + "P", + "Pre", + "Q", + "Rp", + "Rt", + "Ruby", + "Samp", + "Small", + "Span", + "Strong", + "Sub", + "Sup", + "Var", +] diff --git a/contentcuration/contentcuration/utils/assessment/qti/html/base.py b/contentcuration/contentcuration/utils/assessment/qti/html/base.py new file mode 100644 index 0000000000..79dba7cebf --- /dev/null +++ b/contentcuration/contentcuration/utils/assessment/qti/html/base.py @@ -0,0 +1,56 @@ +from typing import List +from typing import Optional + +from pydantic import model_validator + +from contentcuration.utils.assessment.qti.base import ElementTreeBase +from contentcuration.utils.assessment.qti.fields import LocalSrcPath +from contentcuration.utils.assessment.qti.fields import LocalSrcSet + + +class HTMLElement(ElementTreeBase): + """ + Represents an HTML element within QTI. + """ + + @classmethod + def from_html_string(cls, html_string: str) -> List["HTMLElement"]: + """Parse HTML string and return list of HTMLElement instances""" + return cls.from_string(html_string) + + +class FlowContentElement(HTMLElement): + pass + + +class InlineContentElement(FlowContentElement): + pass + + +class BlockContentElement(FlowContentElement): + pass + + +class Source(HTMLElement): + # These attributes are common to all elements in HTML5 + media: Optional[str] = None + type: Optional[str] = None + + # Required if a child of