diff --git a/.github/workflows/lint-and-build.yml b/.github/workflows/lint-and-build.yml index 6623ff54..5d315ef1 100644 --- a/.github/workflows/lint-and-build.yml +++ b/.github/workflows/lint-and-build.yml @@ -65,7 +65,7 @@ jobs: python-version: "3.11" - run: pip install add-trailing-comma - name: Analysing the code with add-trailing-comma - run: add-trailing-comma $(git ls-files '**.py*') --py36-plus + run: add-trailing-comma $(git ls-files '**.py*') Pyright: runs-on: windows-latest strategy: diff --git a/.gitignore b/.gitignore index 40e3ff25..0a6ae97f 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ __pycache__/ # Distribution / packaging +.venv/ env/ build/ dist/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6aed4fc9..3b2c0b53 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,9 +2,8 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: - # Workaround for https://github.com/adamchainz/pre-commit-dprint/issues/4 - id: pretty-format-json - exclude: ".vscode/.*|dprint.json" # Exclude jsonc + exclude: ".vscode/.*" # Exclude jsonc args: [--autofix, --no-sort-keys] - id: trailing-whitespace args: [--markdown-linebreak-ext=md] @@ -18,7 +17,7 @@ repos: - id: pretty-format-ini args: [--autofix] - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: "v0.0.269" # Must match requirements-dev.txt + rev: "v0.0.276" # Must match requirements-dev.txt hooks: - id: ruff args: [--fix] @@ -27,11 +26,12 @@ repos: hooks: - id: autopep8 - repo: https://github.com/asottile/add-trailing-comma - rev: v2.4.0 # Must match requirements-dev.txt + rev: v3.0.0 # Must match requirements-dev.txt hooks: - id: add-trailing-comma ci: + autoupdate_branch: dev skip: # Ignore until Linux support. We don't want lf everywhere yet # And crlf fails on CI because pre-commit runs on linux diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 4ecf5a13..790ee33f 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -1,7 +1,6 @@ // Keep in alphabetical order { "recommendations": [ - "bungcip.better-toml", "davidanson.vscode-markdownlint", "eamodio.gitlens", "emeraldwalk.runonsave", @@ -13,11 +12,14 @@ "pkief.material-icon-theme", "redhat.vscode-xml", "redhat.vscode-yaml", + "tamasfe.even-better-toml", ], "unwantedRecommendations": [ // Must disable in this workspace // // https://github.com/microsoft/vscode/issues/40239 // // + // even-better-toml has format on save + "bungcip.better-toml", // VSCode has implemented an optimized version "coenraads.bracket-pair-colorizer", "coenraads.bracket-pair-colorizer-2", diff --git a/.vscode/settings.json b/.vscode/settings.json index 8405b12c..3abc62f7 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -29,7 +29,7 @@ "commands": [ { "match": "\\.pyi?", - "cmd": "add-trailing-comma ${file} --py36-plus" + "cmd": "add-trailing-comma ${file}" }, ] }, @@ -45,9 +45,14 @@ "**/CVS": true, "**/.DS_Store": true, "**/Thumbs.db": true, - "build": true, - "**/.mypy_cache": true, + "**/.*_cache": true, // mypy and Ruff cache "**/__pycache__": true, + // Only show useful PyInstaller logs + "build/*.*": true, + "build/[b-z]*": true, + "build/**/localpycs": true, + "build/**/Tree-*": true, + "build/**/*.{manifest,pkg,zip,tcl,res,pyz}": true, }, "search.exclude": { "**/*.code-search": true, @@ -69,6 +74,12 @@ 120, // Our hard rule ], }, + "mypy-type-checker.importStrategy": "fromEnvironment", + "mypy-type-checker.args": [ + // https://github.com/microsoft/vscode-mypy/issues/37#issuecomment-1602702174 + "--config-file=mypy.ini", + ], + "python.terminal.activateEnvironment": true, // Important to follow the config in pyrightconfig.json "python.analysis.useLibraryCodeForTypes": false, "python.analysis.diagnosticMode": "workspace", @@ -109,5 +120,25 @@ "xd:pre", // Custom "string" - ] + ], + "[toml]": { + "editor.defaultFormatter": "tamasfe.even-better-toml" + }, + "evenBetterToml.formatter.alignComments": false, + "evenBetterToml.formatter.alignEntries": false, + "evenBetterToml.formatter.allowedBlankLines": 1, + "evenBetterToml.formatter.arrayAutoCollapse": true, + "evenBetterToml.formatter.arrayAutoExpand": true, + "evenBetterToml.formatter.arrayTrailingComma": true, + "evenBetterToml.formatter.columnWidth": 80, + "evenBetterToml.formatter.compactArrays": true, + "evenBetterToml.formatter.compactEntries": false, + "evenBetterToml.formatter.compactInlineTables": false, + "evenBetterToml.formatter.indentEntries": false, + "evenBetterToml.formatter.indentTables": false, + "evenBetterToml.formatter.inlineTableExpand": false, + "evenBetterToml.formatter.reorderArrays": true, + "evenBetterToml.formatter.trailingNewline": true, + // We like keeping TOML keys in a certain non-alphabetical order that feels more natural + "evenBetterToml.formatter.reorderKeys": false } diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 39a29147..65ac2dc5 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -17,12 +17,12 @@ { "label": "Compile resources", "type": "shell", - "command": "scripts/compile_resources.ps1" + "command": ".venv/Scripts/Activate.ps1; scripts/compile_resources.ps1" }, { "label": "Build AutoSplit", "type": "shell", - "command": "scripts/build.ps1", + "command": ".venv/Scripts/Activate.ps1; scripts/build.ps1", "group": { "kind": "build", "isDefault": true diff --git a/README.md b/README.md index 65a6851e..7947277e 100644 --- a/README.md +++ b/README.md @@ -108,17 +108,13 @@ Select the Video Capture Device that you wanna use if selecting the `Video Captu - This value will be set as the threshold for an image if there is no custom threshold set for that image. -#### Pause Time +#### Default Delay Time -- Time in seconds that the program stops comparison after a split. Useful for if you have two of the same split images in a row and want to avoid double-splitting. Also useful for reducing CPU usage. +- Time in milliseconds that the program waits before hitting the split hotkey for that specific split if there is no custom Delay Time set for that image. #### Default Pause Time -- This value will be set as the Pause Time for an image if there is no custom Pause Time set for that image. - -#### Delay Time - -- Time in milliseconds that the program waits before hitting the split hotkey for that specific split. +- Time in seconds that the program stops comparison after a split if there is no custom Pause Time set for that image. Useful for if you have two of the same split images in a row and want to avoid double-splitting. Also useful for reducing CPU usage. #### Dummy splits when undoing / skipping @@ -134,28 +130,38 @@ In this situation you would have only 3 splits in LiveSplit/wsplit (even though - If you are in the 3rd, 4th or 5th image and press the skip key, it will end up on the 6th image - If you are in the 6th image and press the undo key, it will end up on the 5th image -#### Loop Split Images +#### Loop last Split Image to first Split Image If this option is enabled, when the last split meets the threshold and splits, AutoSplit will loop back to the first split image and continue comparisons. If this option is disabled, when the last split meets the threshold and splits, AutoSplit will stop running comparisons. This option does not loop single, specific images. See the Custom Split Image Settings section above for this feature. +#### Start also Resets + +If this option is enabled, a "Start" command (ie: from the Start Image) will also send the "Reset" command. This is useful if you want to automatically restart your timer using the Start Image. Since AutoSplit won't be running and won't be checking for the Reset Image. + +Having the reset image check be active at all time would be a better, more organic solution in the future. But that is dependent on migrating to an observer pattern () and being able to reload all images. + +#### Enable auto Reset Image + +This option is mainly meant to be toggled with the `Toggle auto Reset Image` hotkey. You can enable it to temporarily disable the Reset Image if you make a mistake in your run that would cause the Reset Image to trigger. Like exiting back to the game's menu (aka Save&Quit). + ### Custom Split Image Settings - Each split image can have different thresholds, pause times, delay split times, loop amounts, and can be flagged. - These settings are handled in the image's filename. -- Custom thresholds are place between parenthesis `()` in the filename. This value will override the default threshold. -- Custom pause times are placed between square brackets `[]` in the filename. This value will override the default pause time. -- Custom delay times are placed between hash signs `##` in the filename. Note that these are in milliseconds. For example, a 10 second split delay would be `#10000#`. You cannot skip or undo splits during split delays. -- A different comparison method can be specified with their 0-base index between carets `^^`: +- **Custom thresholds** are place between parenthesis `()` in the filename. This value will override the default threshold. +- **Custom pause times** are placed between square brackets `[]` in the filename. This value will override the default pause time. +- **Custom delay times** are placed between hash signs `##` in the filename. Note that these are in milliseconds. For example, a 10 second split delay would be `#10000#`. You cannot skip or undo splits during split delays. +- A different **comparison method** can be specified with their 0-base index between carets `^^`: - `^0^`: L2 Norm - `^1^`: Histogram - `^2^`: Perceptual Hash -- Image loop amounts are placed between at symbols `@@` in the filename. For example, a specific image that you want to split 5 times in a row would be `@5@`. The current loop # is conveniently located beneath the current split image. -- Flags are placed between curly brackets `{}` in the filename. Multiple flags are placed in the same set of curly brackets. Current available flags: - - `{d}` dummy split image. When matched, it moves to the next image without hitting your split hotkey. - - `{b}` split when similarity goes below the threshold rather than above. When a split image filename has this flag, the split image similarity will go above the threshold, do nothing, and then split the next time the similarity goes below the threshold. - - `{p}` pause flag. When a split image filename has this flag, it will hit your pause hotkey rather than your split hokey. +- **Image loop** amounts are placed between at symbols `@@` in the filename. For example, a specific image that you want to split 5 times in a row would be `@5@`. The current loop # is conveniently located beneath the current split image. +- **Flags** are placed between curly brackets `{}` in the filename. Multiple flags are placed in the same set of curly brackets. Current available flags: + - `{d}` **dummy split image**. When matched, it moves to the next image without hitting your split hotkey. + - `{b}` split when **similarity goes below** the threshold rather than above. When a split image filename has this flag, the split image similarity will go above the threshold, do nothing, and then split the next time the similarity goes below the threshold. + - `{p}` **pause flag**. When a split image filename has this flag, it will hit your pause hotkey rather than your split hokey. - Filename examples: - `001_SplitName_(0.9)_[10].png` is a split image with a threshold of 0.9 and a pause time of 10 seconds. - `002_SplitName_(0.9)_[10]_{d}.png` is the second split image with a threshold of 0.9, pause time of 10, and is a dummy split. @@ -172,17 +178,18 @@ The best way to create a masked image is to set your capture region as the entir ![Mask Example](/docs/mask_example_image.png) -### Reset image +### Reset Image -You can have one (and only one) image with the keyword `reset` in its name. AutoSplit will press the reset button when it finds this image. This image will only be used for resets and it will not be tied to any split. You can set a probability and pause time for it. The pause time is the amount of seconds AutoSplit will wait before checking for the reset image once the run starts. For example: `Reset_(0.95)_[10].png`. +You can have one (and only one) image with the keyword `reset` in its name. AutoSplit will press the reset button when it finds this image. This image will only be used for resets and it will not be tied to any split. You can set a threshold and pause time for it. The pause time is the amount of seconds AutoSplit will wait before checking for the Reset Image once the run starts. For example: `Reset_(0.95)_[10].png`. -### Start image +### Start Image -The start image is similar to the reset image. You can only have one start image with the keyword `start_auto_splitter`.You can reload the image using the "`Reload Start Image`" button. The pause time is the amount of seconds AutoSplit will wait before starting comparisons of the first split image. Delay times will be used to delay starting your timer after the threshold is met. +The Start Image is similar to the Reset Image. You can only have one Start Image with the keyword `start_auto_splitter`.You can reload the image using the "`Reload Start Image`" button. The pause time is the amount of seconds AutoSplit will wait before starting comparisons of the first split image. Delay times will be used to delay starting your timer after the threshold is met. ### Profiles -- Profiles are saved under `%appdata%\AutoSplit\profiles` and use the extension `.toml`. Profiles can be saved and loaded by using File -> Save Profile As... and File -> Load Profile. + +- Profiles use the extension `.toml`. Profiles can be saved and loaded by using `File -> Save Profile As...` and `File -> Load Profile`. - The profile contains all of your settings, including information about the capture region. - You can save multiple profiles, which is useful if you speedrun multiple games. - If you change your display setup (like using a new monitor, or upgrading to Windows 11), you may need to readjust or reselect your Capture Region. @@ -239,18 +246,18 @@ Not a developer? You can still help through the following methods: - [Upvoting feature requests](../../issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc+label%3Aenhancement) you are interested in - Sharing AutoSplit with other speedrunners - Upvoting the following upstream issues in libraries and tools we use: - - - - - - - - - - + - + - + - + - - - - - + - - - - - + - ## Credits diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 1361ffac..41f66bbe 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -34,6 +34,10 @@ Please avoid using magic numbers and prefer constants and enums that have a mean If a constant is shared throughout the app, it should live in `src/utils.py`. Unless it is very-specific to a module. For image shape and channels, please use `utils.ImageShape` and `utils.ColorChannel`. +### Image color format and channels + +To avoid image shape mismatch issues, and to keep code simpler, we standardize the image color format to BGRA. This should always be done early in the pipeline, so whatever functionality takes care of obtaining an image should also ensure its color format. You can do so with `cv2.cvtColor` (ie: `cv2.cvtColor(image, cv2.COLOR_RGBA2BGRA)` or `cv2.cvtColor(image, cv2.COLOR_BGR2BGRA)`). + ## Testing None 😦 Please help us create test suites, we lack the time, but we really want (need!) them. diff --git a/docs/build instructions.md b/docs/build instructions.md index dc85df98..49bc2004 100644 --- a/docs/build instructions.md +++ b/docs/build instructions.md @@ -11,12 +11,20 @@ - [Python](https://www.python.org/downloads/) 3.9+. - [Node](https://nodejs.org) is optional, but required for complete linting. - Alternatively you can install the [pyright python wrapper](https://pypi.org/project/pyright/) which has a bit of an overhead delay. +- [PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell) - [VSCode](https://code.visualstudio.com/Download) is not required, but highly recommended. - Everything already configured in the workspace, including Run (F5) and Build (Ctrl+Shift+B) commands, default shell, and recommended extensions. - [PyCharm](https://www.jetbrains.com/pycharm/) is also a good Python IDE, but nothing is configured. If you are a PyCharm user, feel free to open a PR with all necessary workspace configurations! ## Install and Build steps +- Create and activate a virtual environment: + - Windows / PowerShell: + - `python -m venv .venv` + - `& ./.venv/Scripts/Activate.ps1` + - Unix / Bash: + - `python3 -m venv .venv` + - `source .venv/bin/activate` - Run `./scripts/install.ps1` to install all dependencies. - If you're having issues with the PySide generated code, you might want to first run `pip uninstall -y shiboken6 PySide PySide-Essentials` - Run the app directly with `./scripts/start.ps1 [--auto-controlled]`. diff --git a/mypy.ini b/mypy.ini index a49a7797..7f45e93d 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,21 +1,26 @@ ; We don't run mypy in the CI. This is just to help anyone who would like to use it manually. ; Namely, the mypy_primer tool. [mypy] +show_column_numbers = true +mypy_path = $MYPY_CONFIG_FILE_DIR/typings +implicit_reexport = true + strict = true ; Implicit return types ! +check_untyped_defs = true disallow_untyped_calls = false disallow_untyped_defs = false disallow_incomplete_defs = false +disable_error_code = return + +; exclude mypyc build +exclude = .*(build)/.* + +; Auto-generated code, not much we can do there +[mypy-gen.*] +disable_error_code = attr-defined, arg-type ; Of course my stubs are going to be incomplete. Otherwise they'd be on typeshed! ; Mypy becomes really whack with its errors inside these stubs though -mypy_path = typings,src -; exclude doesn't work with strict=true Why? -exclude = .*(typings|gen)/.* - -[mypy-gen.*,cv2.*,] -; strict=false ; Doesn't work in overrides -follow_imports = skip -implicit_reexport = true -strict_optional = false -disable_error_code = attr-defined, misc, name-defined +[mypy-cv2.*] +disable_error_code = misc, name-defined, override diff --git a/pyproject.toml b/pyproject.toml index f688afa6..df2ec687 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,17 +2,18 @@ [tool.ruff] target-version = "py39" line-length = 120 -select = ["ALL"] +select = ["ALL", "NURSERY"] +extend-fixable = ["NURSERY"] # https://beta.ruff.rs/docs/rules ignore = [ ### # Not needed or wanted ### - "D1", # pydocstyle Missing doctring - "D401", # pydocstyle: non-imperative-mood - "EM", # flake8-errmsg - "FBT", # flake8-boolean-trap - "INP", # flake8-no-pep420 + "D1", # pydocstyle Missing doctring + "D401", # pydocstyle: non-imperative-mood + "EM", # flake8-errmsg + "FBT", # flake8-boolean-trap + "INP", # flake8-no-pep420 "ISC003", # flake8-implicit-str-concat: explicit-string-concatenation # Short messages are still considered "long" messages "TRY003", # tryceratops : raise-vanilla-args @@ -22,6 +23,7 @@ ignore = [ "SIM105", # flake8-simplify: use-contextlib-suppress # Checked by type-checker (pyright) "ANN", # flake-annotations + "PGH003", # blanket-type-ignore "TCH", # flake8-type-checking # Already shown by Pylance, checked by pyright, and can be caused by overloads. "ARG002", # Unused method argument @@ -30,21 +32,32 @@ ignore = [ "D212", # pydocstyle: multi-line-summary-first-line # Allow differentiating between broken (FIXME) and to be done/added/completed (TODO) "TD001", # flake8-todos: invalid-todo-tag + + ### + # These should be warnings (https://github.com/charliermarsh/ruff/issues/1256) + ### + "FIX", # flake8-fixme # Not all TODOs are worth an issue, this would be better as a warning "TD003", # flake8-todos: missing-todo-link + # False-positives + "TCH004", # https://github.com/astral-sh/ruff/issues/3821 + ### # Specific to this project ### + "D205", # Not all docstrings have a short description + desrciption # We have some Pascal case module names "N999", # pep8-naming: Invalid module name # Print are used as debug logs - "T20", # flake8-print - "D205", # Not all docstrings have a short description + desrciption + "T20", # flake8-print # This is a relatively small, low contributors project. Git blame suffice. - "TD002", + "TD002", # missing-todo-author + # Python 3.11, introduced "zero cost" exception handling + "PERF203", # try-except-in-loop - ### FIXME (no warnings in Ruff yet: https://github.com/charliermarsh/ruff/issues/1256): + ### FIXME/TODO (no warnings in Ruff yet: https://github.com/charliermarsh/ruff/issues/1256): + "CPY001", "PTH", # Ignore until linux support "EXE", @@ -55,10 +68,11 @@ ignore = [ "F811", # Re-exports false positives "F821", # https://github.com/charliermarsh/ruff/issues/3011 # The following can't be controlled for external libraries: - "N8", # Naming conventions - "A", # Shadowing builtin names + "A", # Shadowing builtin names + "ICN001", # unconventional-import-alias + "N8", # Naming conventions "PLR0913", # Argument count - "PYI042", # CamelCase TypeAlias + "PYI042", # CamelCase TypeAlias ] # https://beta.ruff.rs/docs/settings/#flake8-implicit-str-concat @@ -74,14 +88,14 @@ required-imports = ["from __future__ import annotations"] # https://github.com/charliermarsh/ruff/issues/2419 # https://github.com/charliermarsh/ruff/issues/3115 known-local-folder = [ - "capture_method", - "gen", "AutoControlledThread", "AutoSplit", "AutoSplitImage", + "capture_method", "compare", "error_messages", "error_messages", + "gen", "hotkeys", "menu_bar", "region_selection", @@ -108,9 +122,17 @@ max-branches = 15 [tool.autopep8] max_line_length = 120 aggressive = 3 +exclude = ".venv/*,src/gen/*" ignore = [ "E124", # Closing bracket may not match multi-line method invocation style (enforced by add-trailing-comma) - "E70", # Allow ... on same line as def + "E70", # Allow ... on same line as def + # Autofixed by Ruff + # Check for the "Fix" flag https://beta.ruff.rs/docs/rules/#pycodestyle-e-w + "E401", # I001: unsorted-imports + "E71", # Comparisons + "E731", # lambda-assignment + "W29", # Whitespaces + "W605", # invalid-escape-sequence ] # https://github.com/microsoft/pyright/blob/main/docs/configuration.md#sample-pyprojecttoml-file @@ -119,15 +141,18 @@ typeCheckingMode = "strict" # Prefer `pyright: ignore` enableTypeIgnoreComments = false # Extra strict +reportImplicitOverride = "error" reportImplicitStringConcatenation = "error" reportCallInDefaultInitializer = "error" -reportMissingSuperCall = "none" # False positives on base classes +reportMissingSuperCall = "none" # False positives on base classes reportPropertyTypeMismatch = "error" reportUninitializedInstanceVariable = "error" reportUnnecessaryTypeIgnoreComment = "error" # Exclude from scanning when running pyright exclude = [ + ".venv/", # Auto generated, fails some strict pyright checks + "build/", "src/gen/", ] # Ignore must be specified for Pylance to stop displaying errors diff --git a/res/about.ui b/res/about.ui index b9e1ee1a..b0e88fd9 100644 --- a/res/about.ui +++ b/res/about.ui @@ -1,5 +1,6 @@ + Toufool AboutAutoSplitWidget @@ -126,12 +127,12 @@ Thank you! true - ok_button - donate_text_label - donate_button_label icon_label + donate_text_label version_label created_by_label + ok_button + donate_button_label diff --git a/res/design.ui b/res/design.ui index f4c22050..4f339507 100644 --- a/res/design.ui +++ b/res/design.ui @@ -1,5 +1,6 @@ + Toufool MainWindow @@ -473,7 +474,7 @@ Select Window - + 696 @@ -894,42 +895,6 @@ > - x_label - select_region_button - start_auto_splitter_button - reset_button - undo_split_button - skip_split_button - check_fps_button - fps_label - current_image_label - live_image - current_split_image - width_label - height_label - fps_value_label - width_spinbox - height_spinbox - capture_region_label - current_image_file_label - take_screenshot_button - x_spinbox - y_spinbox - y_label - align_region_button - select_window_button - browse_button - split_image_folder_label - split_image_folder_input - capture_region_window_label - image_loop_label - similarity_viewer_groupbox - reload_start_image_button - start_image_status_label - start_image_status_value_label - image_loop_value_label - previous_image_button - next_image_button diff --git a/res/opencv_python_headless-4.7.0+be945d8-cp37-abi3-win_amd64.whl b/res/opencv_python_headless-4.8.0+4f81552-cp37-abi3-win_amd64.whl similarity index 76% rename from res/opencv_python_headless-4.7.0+be945d8-cp37-abi3-win_amd64.whl rename to res/opencv_python_headless-4.8.0+4f81552-cp37-abi3-win_amd64.whl index 585a6efe..e366e442 100644 Binary files a/res/opencv_python_headless-4.7.0+be945d8-cp37-abi3-win_amd64.whl and b/res/opencv_python_headless-4.8.0+4f81552-cp37-abi3-win_amd64.whl differ diff --git a/res/settings.ui b/res/settings.ui index 45f8d5d4..1711b9b1 100644 --- a/res/settings.ui +++ b/res/settings.ui @@ -1,25 +1,26 @@ + Toufool SettingsWidget 0 0 - 290 - 664 + 285 + 294 - 290 - 664 + 200 + 200 - 290 - 664 + 310 + 294 @@ -34,160 +35,593 @@ :/resources/icon.ico:/resources/icon.ico - + - 10 - 200 - 270 - 181 + -3 + -3 + 291 + 301 - - Capture Settings + + 0 - - - - 150 - 24 - 51 - 24 - - - - QAbstractSpinBox::CorrectToNearestValue - - - 20 - - - 240 - - - 60 - - - - - - 6 - 27 - 141 - 16 - - - - This value will limit the amount of frames per second that AutoSplit will run comparisons - - - Comparison FPS Limit: - - - - - - 6 - 49 - 141 - 20 - - - - Live Capture Region - - - true - - - false - - - - - - 6 - 97 - 251 - 22 - - - - - - - 6 - 75 - 151 - 16 - - - - Capture method: - - - - - - 6 - 126 - 151 - 16 - - - - Capture device: - - - - - false - - - - 6 - 148 - 251 - 22 - - - - Scanning for existing devices... - - - - - - - 10 - 390 - 270 - 266 - - - - Image Settings - - - Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter - - - false - - - false - - - - - 170 - 25 - 91 - 22 - - - - L2 Norm: + + + Hotkeys + + + + + 190 + 189 + 81 + 24 + + + + Qt::NoFocus + + + Set Hotkey + + + + + + 90 + 10 + 91 + 22 + + + + + + + true + + + + + + 10 + 73 + 71 + 16 + + + + Undo Split: + + + + + + 90 + 70 + 91 + 22 + + + + + + + true + + + + + + 190 + 39 + 81 + 24 + + + + Qt::NoFocus + + + Set Hotkey + + + + + + 10 + 133 + 71 + 16 + + + + Pause: + + + + + + 190 + 69 + 81 + 24 + + + + Qt::NoFocus + + + Set Hotkey + + + + + + 90 + 40 + 91 + 22 + + + + + + + true + + + + + + 10 + 103 + 71 + 16 + + + + Skip Split: + + + + + + 190 + 9 + 81 + 24 + + + + Qt::NoFocus + + + Set Hotkey + + + + + + 190 + 99 + 81 + 24 + + + + Qt::NoFocus + + + Set Hotkey + + + + + + 190 + 159 + 81 + 24 + + + + Qt::NoFocus + + + Set Hotkey + + + + + + 90 + 190 + 91 + 22 + + + + + + + true + + + + + + 190 + 129 + 81 + 24 + + + + Qt::NoFocus + + + Set Hotkey + + + + + + 10 + 13 + 71 + 16 + + + + Start / Split: + + + + + + 90 + 130 + 91 + 22 + + + + + + + true + + + + + + 10 + 186 + 71 + 32 + + + + Toggle auto +reset image + + + + + + 10 + 43 + 71 + 16 + + + + Reset: + + + + + + 90 + 160 + 91 + 22 + + + + + + + true + + + + + + 10 + 163 + 71 + 16 + + + + Screenshot: + + + + + + 90 + 100 + 91 + 22 + + + + + + + true + + + + + + Capture Settings + + + + + 10 + 200 + 261 + 22 + + + + true + + + + + + 10 + 13 + 141 + 16 + + + + This value will limit the amount of frames per second that AutoSplit will run comparisons + + + Comparison FPS Limit: + + + + + + 10 + 40 + 141 + 24 + + + + Live Capture Region + + + true + + + false + + + + + + 10 + 70 + 151 + 16 + + + + Capture method: + + + + + + 150 + 10 + 51 + 24 + + + + QAbstractSpinBox::CorrectToNearestValue + + + 20 + + + 240 + + + 60 + + + + + + 200 + 176 + 71 + 24 + + + + Qt::NoFocus + + + Browse... + + + + + + 10 + 120 + 151 + 16 + + + + Capture device: + + + + + + 10 + 90 + 261 + 22 + + + + + + false + + + + 10 + 140 + 261 + 22 + + + + Scanning for existing devices... + + + + + + 10 + 180 + 151 + 16 + + + + Screenshots folder: + + + + + + 10 + 220 + 181 + 24 + + + + Open screenshots on capture + + + true + + + false + + + + + + Image Settings + + + + + 144 + 220 + 71 + 31 + + + + + Segoe UI + 8 + true + + + + README + + + + 0 + 0 + + + + This is a workaround because custom_image_settings_info_label simply will not open links with a left click no matter what we tried. + + + + + + 180 + 70 + 91 + 24 + + + + After an image is matched, this is the amount of time in millseconds that will be delayed before splitting. + + + QAbstractSpinBox::CorrectToNearestValue + + + 999999999 + + + + + + 10 + 170 + 261 + 24 + + + + Enable auto reset image + + + true + + + + + + 180 + 10 + 91 + 22 + + + + L2 Norm: This method should be fine to use for most cases. It finds the difference between each pixel, squares it, sums it over the entire image and takes the square root. This is very fast but is a problem if your image is high frequency. @@ -204,525 +638,201 @@ Perceptual Hash: An explanation on pHash comparison can be found here http://www.hackerfactor.com/blog/index.php?/archives/432-Looks-Like-It.html It is highly recommended to NOT use pHash if you use masked images, or it'll be very inaccurate. - - + + + + L2 Norm + + + + + Histograms + + + + + pHash + + + + + + + 10 + 13 + 161 + 16 + + - L2 Norm + Default Comparison Method: + + + + + + 10 + 130 + 261 + 24 + - - - Histograms + Loop Last Split Image to First Split Image + + + false + + + false + + + + + + 10 + 200 + 261 + 61 + + + + + 8 + true + - - - pHash + <html><head/><body><p>Image settings and flags can be set per image through the image file name. These will override the default values. View the <a href="https://github.com/{GITHUB_REPOSITORY}#readme"><span style="text-decoration: underline; color:#0000ff;">README</span></a> for full details on all available custom image settings.</p></body></html> - - - - - - 6 - 28 - 171 - 16 - - - - Default Comparison Method: - - - - - - 6 - 118 - 171 - 16 - - - - Default Pause Time (sec): - - - - - - 170 - 115 - 91 - 24 - - - - The amount of time in seconds that comparison will be paused before moving to the next image. - - - QAbstractSpinBox::CorrectToNearestValue - - - 2 - - - 999999999.000000000000000 - - - 1.000000000000000 - - - 10.000000000000000 - - - - - - 6 - 58 - 171 - 16 - - - - Default Similarity Threshold: - - - - - - 170 - 55 - 51 - 24 - - - - Threshold that the live similarity will need to go above to consider the image a match. - - - QAbstractSpinBox::CorrectToNearestValue - - - 1.000000000000000 - - - 0.010000000000000 - - - 0.900000000000000 - - - - - - 6 - 143 - 261 - 20 - - - - Loop Last Split Image to First Split Image - - - false - - - false - - - - - - 6 - 193 - 261 - 71 - - - - - 8 - true - - - - <html><head/><body><p>Image settings and flags can be set per image through the image file name. These will override the default values. View the <a href="https://github.com/{GITHUB_REPOSITORY}#readme"><span style="text-decoration: underline; color:#0000ff;">README</span></a> for full details on all available custom image settings.</p></body></html> - - - true - - - - - - 6 - 88 - 171 - 16 - - - - Default Delay Time (ms): - - - - - - 170 - 85 - 91 - 24 - - - - After an image is matched, this is the amount of time in millseconds that will be delayed before splitting. - - - QAbstractSpinBox::CorrectToNearestValue - - - 999999999 - - - - - - 140 - 218 - 71 - 31 - - - - - Segoe UI - 8 - true - - - - README - - - - 0 - 0 - - - - This is a workaround because custom_image_settings_info_label simply will not open links with a left click no matter what we tried. - - - - - - 6 - 168 - 171 - 20 - - - - Enable auto reset image - - - true - - - - - - - 10 - 10 - 270 - 191 - - - - Hotkeys - - - Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter - - - false - - - false - - - - - 180 - 128 - 81 - 24 - - - - Qt::NoFocus - - - Set Hotkey - - - - - - 80 - 25 - 94 - 22 - - - - - - - true - - - - - - 80 - 77 - 94 - 22 - - - - - - - true - - - - - - 6 - 28 - 71 - 16 - - - - Start / Split: - - - - - - 80 - 51 - 94 - 22 - - - - - - - true - - - - - - 180 - 76 - 81 - 24 - - - - Qt::NoFocus - - - Set Hotkey - - - - - - 6 - 54 - 41 - 16 - - - - Reset: - - - - - - 180 - 50 - 81 - 24 - - - - Qt::NoFocus - - - Set Hotkey - - - - - - 180 - 24 - 81 - 24 - - - - Qt::NoFocus - - - Set Hotkey - - - - - - 6 - 132 - 41 - 16 - - - - Pause: - - - - - - 80 - 129 - 94 - 22 - - - - - - - true - - - - - - 6 - 80 - 71 - 16 - - - - Undo Split: - - - - - - 180 - 102 - 81 - 24 - - - - Qt::NoFocus - - - Set Hotkey - - - - - - 6 - 106 - 61 - 16 - - - - Skip Split: - - - - - - 80 - 103 - 94 - 22 - - - - - - - true - - - - - - 180 - 154 - 81 - 24 - - - - Qt::NoFocus - - - Set Hotkey - - - - - - 6 - 151 - 71 - 32 - - - - Toggle auto -reset image - - - - - - 80 - 155 - 94 - 22 - - - - - - - true - + + true + + + + + + 10 + 40 + 171 + 24 + + + + Default Similarity Threshold: + + + + + + 180 + 40 + 51 + 24 + + + + Threshold that the live similarity will need to go above to consider the image a match. + + + QAbstractSpinBox::CorrectToNearestValue + + + 1.000000000000000 + + + 0.010000000000000 + + + 0.900000000000000 + + + + + + 180 + 100 + 91 + 24 + + + + The amount of time in seconds that comparison will be paused before moving to the next image. + + + QAbstractSpinBox::CorrectToNearestValue + + + 2 + + + 999999999.000000000000000 + + + 1.000000000000000 + + + 10.000000000000000 + + + + + + 10 + 103 + 171 + 16 + + + + Default Pause Time (sec): + + + + + + 10 + 73 + 171 + 16 + + + + Default Delay Time (ms): + + + + + + 10 + 150 + 261 + 24 + + + + Start also Resets + + + false + + + false + + + custom_image_settings_info_label + default_delay_time_spinbox + enable_auto_reset_image_checkbox + default_comparison_method_combobox + default_comparison_method_combobox_label + loop_splits_checkbox + default_similarity_threshold_label + default_similarity_threshold_spinbox + default_pause_time_spinbox + default_pause_time_label + default_delay_time_label + readme_link_button + start_also_resets_checkbox @@ -732,11 +842,14 @@ reset image set_undo_split_hotkey_button set_skip_split_hotkey_button set_pause_hotkey_button + set_screenshot_hotkey_button + set_toggle_auto_reset_image_hotkey_button fps_limit_spinbox live_capture_region_checkbox capture_method_combobox capture_device_combobox - default_comparison_method + screenshot_directory_browse_button + default_comparison_method_combobox default_similarity_threshold_spinbox default_delay_time_spinbox default_pause_time_spinbox diff --git a/res/update_checker.ui b/res/update_checker.ui index 6ff6ea85..ee811a23 100644 --- a/res/update_checker.ui +++ b/res/update_checker.ui @@ -1,5 +1,6 @@ + Toufool UpdateChecker diff --git a/scripts/build.ps1 b/scripts/build.ps1 index f9825aaf..b0ed2844 100644 --- a/scripts/build.ps1 +++ b/scripts/build.ps1 @@ -5,6 +5,12 @@ $arguments = @( '--onefile', '--windowed', '--additional-hooks-dir=Pyinstaller/hooks', + # Optional packages installed by PyAutoGUI + '--exclude=pygetwindow', + '--exclude=pymsgbox', + '--exclude=pytweening', + # Used by imagehash.whash + '--exclude=pywt', '--icon=res/icon.ico', '--splash=res/splash.png') diff --git a/scripts/compile_resources.ps1 b/scripts/compile_resources.ps1 index 91a180a3..1a49dd22 100644 --- a/scripts/compile_resources.ps1 +++ b/scripts/compile_resources.ps1 @@ -1,7 +1,8 @@ $originalDirectory = $pwd Set-Location "$PSScriptRoot/.." -New-Item -Force -ItemType directory ./src/gen | Out-Null +New-Item ./src/gen -ItemType directory -Force | Out-Null +New-Item ./src/gen/__init__.py -ItemType File -Force | Out-Null pyside6-uic './res/about.ui' -o './src/gen/about.py' pyside6-uic './res/design.ui' -o './src/gen/design.py' pyside6-uic './res/settings.ui' -o './src/gen/settings.py' diff --git a/scripts/install.ps1 b/scripts/install.ps1 index 1a513d35..284633e1 100644 --- a/scripts/install.ps1 +++ b/scripts/install.ps1 @@ -4,6 +4,17 @@ $dev = If ($Env:GITHUB_JOB -eq 'Build') { '' } Else { '-dev' } python -m pip install wheel pip setuptools --upgrade pip install -r "$PSScriptRoot/requirements$dev.txt" --upgrade +# Patch libraries so we don't have to install from git + +# Prevent pyautogui from setting Process DPI Awareness, which Qt tries to do then throws warnings about it. +# The unittest workaround significantly increases build time, boot time and build size with PyInstaller. +# https://github.com/asweigart/pyautogui/issues/663#issuecomment-1296719464 +$pyautoguiPath = python -c 'import pyautogui as _; print(_.__path__[0])' +(Get-Content "$pyautoguiPath/_pyautogui_win.py").replace('ctypes.windll.user32.SetProcessDPIAware()', 'pass') | + Set-Content "$pyautoguiPath/_pyautogui_win.py" +python -m pip uninstall pyscreeze mouseinfo pyperclip -y + + # Don't compile resources on the Build CI job as it'll do so in build script If ($dev) { & "$PSScriptRoot/compile_resources.ps1" diff --git a/scripts/lint.ps1 b/scripts/lint.ps1 index b59e502e..84dc6f8b 100644 --- a/scripts/lint.ps1 +++ b/scripts/lint.ps1 @@ -3,8 +3,8 @@ Set-Location "$PSScriptRoot/.." $exitCodes = 0 Write-Host "`nRunning formatting..." -autopep8 $(git ls-files '**.py*') --in-place -add-trailing-comma $(git ls-files '**.py*') --py36-plus +autopep8 src/ --recursive --in-place +add-trailing-comma $(git ls-files '**.py*') Write-Host "`nRunning Ruff..." ruff check . --fix diff --git a/scripts/requirements-dev.txt b/scripts/requirements-dev.txt index 9898efaa..e59eb6a9 100644 --- a/scripts/requirements-dev.txt +++ b/scripts/requirements-dev.txt @@ -7,9 +7,9 @@ -r requirements.txt # # Linters & Formatters -add-trailing-comma>=2.3.0 # Added support for with statement -autopep8>=2.0.0 # New checks -ruff>=0.0.269 # New TODO and PYI violations +add-trailing-comma>=3.0.0 # Added support for with statement # Must match .pre-commit-config.yaml +autopep8>=2.0.2 # New checks # Must match .pre-commit-config.yaml +ruff>=0.0.276 # Fix ignored "ignore" configuration + cache fixes # Must match .pre-commit-config.yaml # # Run `./scripts/designer.ps1` to quickly open the bundled Qt Designer. # Can also be downloaded externally as a non-python package diff --git a/scripts/requirements.txt b/scripts/requirements.txt index 1ad1ac5e..21d9a42c 100644 --- a/scripts/requirements.txt +++ b/scripts/requirements.txt @@ -7,17 +7,18 @@ certifi ImageHash>=4.3.1 # Contains type information + setup as package not module git+https://github.com/boppreh/keyboard.git#egg=keyboard # Fix install on macos and linux-ci https://github.com/boppreh/keyboard/pull/568 numpy>=1.23.2 # Python 3.11 wheels -# opencv-python-headless>=4.6 # Breaking changes importing cv2.cv2 -./res/opencv_python_headless-4.7.0+be945d8-cp37-abi3-win_amd64.whl # New typing + OBS Camera fixes +# From https://github.com/opencv/opencv-python/actions/runs/5461702800 +./res/opencv_python_headless-4.8.0+4f81552-cp37-abi3-win_amd64.whl +# opencv-python-headless>=4.8.0.74 # New typing + OBS Camera fixes packaging Pillow>=9.2 # gnome-screeshot checks psutil PyAutoGUI -PyWinCtl>=0.0.42 # py.typed -PySide6-Essentials>=6.5.1 # fixes incomplete tuple return types https://bugreports.qt.io/browse/PYSIDE-2285 +PyWinCtl>=0.0.42 # py.typed +PySide6-Essentials>=6.5.1 # fixes incomplete tuple return types https://bugreports.qt.io/browse/PYSIDE-2285 requests<=2.28.1 # 2.28.2 has issues with PyInstaller https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/534 toml -typing-extensions>=4.4.0 # @override decorator support +typing-extensions>=4.4.0 # @override decorator support # # Build and compile resources pyinstaller>=5.5 # Python 3.11 support diff --git a/src/AutoSplit.py b/src/AutoSplit.py index aa6706df..21c9f6b3 100644 --- a/src/AutoSplit.py +++ b/src/AutoSplit.py @@ -12,6 +12,7 @@ import certifi import cv2 +from cv2.typing import MatLike from psutil import process_iter from PySide6 import QtCore, QtGui from PySide6.QtTest import QTest @@ -38,7 +39,16 @@ from region_selection import align_region, select_region, select_window, validate_before_parsing from split_parser import BELOW_FLAG, DUMMY_FLAG, PAUSE_FLAG, parse_and_validate_images from user_profile import DEFAULT_PROFILE -from utils import AUTOSPLIT_VERSION, FROZEN, auto_split_directory, decimal, is_valid_image, open_file +from utils import ( + AUTOSPLIT_VERSION, + BGRA_CHANNEL_COUNT, + FROZEN, + auto_split_directory, + decimal, + flatten, + is_valid_image, + open_file, +) CHECK_FPS_ITERATIONS = 10 DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2 = 2 @@ -47,8 +57,6 @@ os.environ["REQUESTS_CA_BUNDLE"] = certifi.where() myappid = f"Toufool.AutoSplit.v{AUTOSPLIT_VERSION}" ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) -# qt.qpa.window: SetProcessDpiAwarenessContext(DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2) failed: COM error 0x5: Access is denied. # noqa: E501 -# ctypes.windll.user32.SetProcessDpiAwarenessContext(DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2) class AutoSplit(QMainWindow, design.Ui_MainWindow): @@ -61,6 +69,7 @@ class AutoSplit(QMainWindow, design.Ui_MainWindow): skip_split_signal = QtCore.Signal() undo_split_signal = QtCore.Signal() pause_signal = QtCore.Signal() + screenshot_signal = QtCore.Signal() after_setting_hotkey_signal = QtCore.Signal() update_checker_widget_signal = QtCore.Signal(str, bool) load_start_image_signal = QtCore.Signal(bool, bool) @@ -77,39 +86,39 @@ class AutoSplit(QMainWindow, design.Ui_MainWindow): CheckForUpdatesThread: QtCore.QThread | None = None SettingsWidget: settings.Ui_SettingsWidget | None = None - # Initialize a few attributes - hwnd = 0 - """Window Handle used for Capture Region""" - last_saved_settings = DEFAULT_PROFILE - similarity = 0.0 - split_image_number = 0 - split_images_and_loop_number: list[tuple[AutoSplitImage, int]] = [] - split_groups: list[list[int]] = [] - capture_method = CaptureMethodBase(None) - is_running = False - - # Last loaded settings empty and last successful loaded settings file path to None until we try to load them - last_loaded_settings = DEFAULT_PROFILE - last_successfully_loaded_settings_file_path: str | None = None - """For when a file has never loaded, but you successfully "Save File As".""" - - # Automatic timer start - highest_similarity = 0.0 - reset_highest_similarity = 0.0 - - # Ensure all other attributes are defined - waiting_for_split_delay = False - split_below_threshold = False - run_start_time = 0.0 - start_image: AutoSplitImage | None = None - reset_image: AutoSplitImage | None = None - split_images: list[AutoSplitImage] = [] - split_image: AutoSplitImage | None = None - update_auto_control: AutoControlledThread | None = None - def __init__(self): # noqa: PLR0915 super().__init__() + # Initialize a few attributes + self.hwnd = 0 + """Window Handle used for Capture Region""" + self.last_saved_settings = DEFAULT_PROFILE + self.similarity = 0.0 + self.split_image_number = 0 + self.split_images_and_loop_number: list[tuple[AutoSplitImage, int]] = [] + self.split_groups: list[list[int]] = [] + self.capture_method = CaptureMethodBase(None) + self.is_running = False + + # Last loaded settings empty and last successful loaded settings file path to None until we try to load them + self.last_loaded_settings = DEFAULT_PROFILE + self.last_successfully_loaded_settings_file_path: str | None = None + """For when a file has never loaded, but you successfully "Save File As".""" + + # Automatic timer start + self.highest_similarity = 0.0 + self.reset_highest_similarity = 0.0 + + # Ensure all other attributes are defined + self.waiting_for_split_delay = False + self.split_below_threshold = False + self.run_start_time = 0.0 + self.start_image: AutoSplitImage | None = None + self.reset_image: AutoSplitImage | None = None + self.split_images: list[AutoSplitImage] = [] + self.split_image: AutoSplitImage | None = None + self.update_auto_control: AutoControlledThread | None = None + # Setup global error handling def _show_error_signal_slot(error_message_box: Callable[..., object]): return error_message_box() @@ -117,7 +126,10 @@ def _show_error_signal_slot(error_message_box: Callable[..., object]): sys.excepthook = error_messages.make_excepthook(self) self.setupUi(self) - self.setWindowTitle(f"AutoSplit v{AUTOSPLIT_VERSION}") + self.setWindowTitle( + f"AutoSplit v{AUTOSPLIT_VERSION}" + + (" (externally controlled)" if self.is_auto_controlled else ""), + ) # Hotkeys need to be initialized to be passed as thread arguments in hotkeys.py for hotkey in HOTKEYS: @@ -153,7 +165,7 @@ def _show_error_signal_slot(error_message_box: Callable[..., object]): self.action_load_profile.triggered.connect(lambda: user_profile.load_settings(self)) # Connecting button clicks to functions - self.browse_button.clicked.connect(self.__browse) + self.split_image_folder_button.clicked.connect(self.__browse) self.select_region_button.clicked.connect(lambda: select_region(self)) self.take_screenshot_button.clicked.connect(self.__take_screenshot) self.start_auto_splitter_button.clicked.connect(self.__auto_splitter) @@ -189,6 +201,7 @@ def _update_checker_widget_signal_slot(latest_version: str, check_on_open: bool) self.skip_split_signal.connect(self.skip_split) self.undo_split_signal.connect(self.undo_split) self.pause_signal.connect(self.pause) + self.screenshot_signal.connect(self.__take_screenshot) # live image checkbox self.timer_live_image.timeout.connect(lambda: self.__update_live_image_details(None, True)) @@ -229,7 +242,7 @@ def __browse(self): self.split_image_folder_input.setText(f"{new_split_image_directory}/") self.load_start_image_signal.emit(False, True) - def __update_live_image_details(self, capture: cv2.Mat | None, called_from_timer: bool = False): + def __update_live_image_details(self, capture: MatLike | None, called_from_timer: bool = False): # HACK: Since this is also called in __get_capture_for_comparison, # we don't need to update anything if the app is running if called_from_timer: @@ -248,30 +261,20 @@ def __update_live_image_details(self, capture: cv2.Mat | None, called_from_timer self.live_image.clear() # Set live image in UI else: - set_preview_image(self.live_image, capture, False) + set_preview_image(self.live_image, capture) def __load_start_image(self, started_by_button: bool = False, wait_for_delay: bool = True): """Not thread safe (if triggered by LiveSplit for example). Use `load_start_image_signal.emit` instead.""" self.timer_start_image.stop() self.current_image_file_label.setText("-") self.start_image_status_value_label.setText("not found") - set_preview_image(self.current_split_image, None, True) + set_preview_image(self.current_split_image, None) if not (validate_before_parsing(self, started_by_button) and parse_and_validate_images(self)): QApplication.processEvents() return - if self.start_image: - if not self.is_auto_controlled \ - and ( - not self.settings_dict["split_hotkey"] - or not self.settings_dict["reset_hotkey"] - or not self.settings_dict["pause_hotkey"] - ): - error_messages.load_start_image() - QApplication.processEvents() - return - else: + if not self.start_image: if started_by_button: error_messages.no_keyword_image(START_KEYWORD) QApplication.processEvents() @@ -332,12 +335,12 @@ def __start_image_function(self): self.split_below_threshold = False if not self.start_image.check_flag(DUMMY_FLAG): - # Delay start image if needed + # Delay Start Image if needed if self.start_image.get_delay_time(self) > 0: self.start_image_status_value_label.setText("delaying start...") delay_start_time = time() start_delay = self.start_image.get_delay_time(self) / 1000 - time_delta = 0 + time_delta = 0.0 while time_delta < start_delay: delay_time_left = start_delay - time_delta self.current_split_image.setText( @@ -374,7 +377,7 @@ def __take_screenshot(self): screenshot_index = 1 while True: screenshot_path = os.path.join( - self.settings_dict["split_image_directory"], + self.settings_dict["screenshot_directory"] or self.settings_dict["split_image_directory"], f"{screenshot_index:03}_SplitImage.png", ) if not os.path.exists(screenshot_path): @@ -389,7 +392,8 @@ def __take_screenshot(self): # Save and open image cv2.imwrite(screenshot_path, capture) - open_file(screenshot_path) + if self.settings_dict["open_screenshot"]: + open_file(screenshot_path) def __check_fps(self): self.fps_value_label.setText("...") @@ -428,10 +432,12 @@ def undo_split(self, navigate_image_only: bool = False): """Undo Split" and "Prev. Img." buttons connect to here.""" # Can't undo until timer is started # or Undoing past the first image - if not self.is_running \ - or "Delayed Split" in self.current_split_image.text() \ - or (not self.undo_split_button.isEnabled() and not self.is_auto_controlled) \ - or self.__is_current_split_out_of_range(): + if ( + not self.is_running + or "Delayed Split" in self.current_split_image.text() + or (not self.undo_split_button.isEnabled() and not self.is_auto_controlled) + or self.__is_current_split_out_of_range() + ): return if not navigate_image_only: @@ -509,27 +515,31 @@ def __auto_splitter(self): # noqa: PLR0912,PLR0915 self.run_start_time = time() if not (validate_before_parsing(self) and parse_and_validate_images(self)): - self.gui_changes_on_reset(True) + # `safe_to_reload_start_image: bool = False` becasue __load_start_image also does this check, + # we don't want to double a Start/Reset Image error message + self.gui_changes_on_reset(False) return # Construct a list of images + loop count tuples. - self.split_images_and_loop_number = [ - item for flattenlist - in [ + self.split_images_and_loop_number = list( + flatten( [(split_image, i + 1) for i in range(split_image.loops)] for split_image in self.split_images - ] - for item in flattenlist - ] + ), + ) # Construct groups of splits self.split_groups = [] + dummy_splits_array = [] + number_of_split_images = len(self.split_images_and_loop_number) current_group: list[int] = [] self.split_groups.append(current_group) for i, image in enumerate(self.split_images_and_loop_number): current_group.append(i) - if not image[0].check_flag(DUMMY_FLAG) and i < len(self.split_images_and_loop_number) - 1: + dummy = image[0].check_flag(DUMMY_FLAG) + dummy_splits_array.append(dummy) + if not dummy and i < number_of_split_images - 1: current_group = [] self.split_groups.append(current_group) @@ -545,8 +555,6 @@ def __auto_splitter(self): # noqa: PLR0912,PLR0915 self.waiting_for_split_delay = False self.split_below_threshold = False split_time = 0 - number_of_split_images = len(self.split_images_and_loop_number) - dummy_splits_array = [image_loop[0].check_flag(DUMMY_FLAG) for image_loop in self.split_images_and_loop_number] # First loop: stays in this loop until all of the split images have been split while self.split_image_number < number_of_split_images: @@ -663,7 +671,7 @@ def __similarity_threshold_loop(self, number_of_split_images: int, dummy_splits_ # if the b flag is set, let similarity go above threshold first, # then split on similarity below threshold. # if no b flag, just split when similarity goes above threshold. - # TODO: Abstract with similar check in start image + # TODO: Abstract with similar check in Start Image if not self.waiting_for_split_delay: if similarity >= self.split_image.get_similarity_threshold(self): if not below_flag: @@ -695,7 +703,7 @@ def __pause_loop(self, stop_time: float, message: str): # This is done so that it can detect if user hit split/undo split while paused/delayed. pause_split_image_number = self.split_image_number while True: - # Calculate similarity for reset image + # Calculate similarity for Reset Image if self.__reset_if_should(self.__get_capture_for_comparison()[0]): return True @@ -718,7 +726,7 @@ def __pause_loop(self, stop_time: float, message: str): def gui_changes_on_start(self): self.timer_start_image.stop() self.start_auto_splitter_button.setText("Running...") - self.browse_button.setEnabled(False) + self.split_image_folder_button.setEnabled(False) self.reload_start_image_button.setEnabled(False) self.previous_image_button.setEnabled(True) self.next_image_button.setEnabled(True) @@ -748,7 +756,7 @@ def gui_changes_on_reset(self, safe_to_reload_start_image: bool = False): self.table_reset_image_live_label.setText("-") self.table_reset_image_highest_label.setText("-") self.table_reset_image_threshold_label.setText("-") - self.browse_button.setEnabled(True) + self.split_image_folder_button.setEnabled(True) self.reload_start_image_button.setEnabled(True) self.previous_image_button.setEnabled(False) self.next_image_button.setEnabled(False) @@ -788,7 +796,7 @@ def __get_capture_for_comparison(self): self.__update_live_image_details(capture) return capture, is_old_image - def __reset_if_should(self, capture: cv2.Mat | None): + def __reset_if_should(self, capture: MatLike | None): """Checks if we should reset, resets if it's the case, and returns the result.""" if self.reset_image: if self.settings_dict["enable_auto_reset"]: @@ -837,7 +845,7 @@ def __update_split_image(self, specific_image: AutoSplitImage | None = None): # Get split image self.split_image = specific_image or self.split_images_and_loop_number[0 + self.split_image_number][0] if is_valid_image(self.split_image.byte_array): - set_preview_image(self.current_split_image, self.split_image.byte_array, True) + set_preview_image(self.current_split_image, self.split_image.byte_array) self.current_image_file_label.setText(self.split_image.filename) self.table_current_image_threshold_label.setText(decimal(self.split_image.get_similarity_threshold(self))) @@ -855,7 +863,8 @@ def closeEvent(self, event: QtGui.QCloseEvent | None = None): def exit_program() -> NoReturn: if self.update_auto_control: - self.update_auto_control.terminate() + # self.update_auto_control.terminate() hangs in PySide6 + self.update_auto_control.quit() self.capture_method.close(self) if event is not None: event.accept() @@ -897,21 +906,21 @@ def exit_program() -> NoReturn: exit_program() -def set_preview_image(qlabel: QLabel, image: cv2.Mat | None, transparency: bool): +def set_preview_image(qlabel: QLabel, image: MatLike | None): if not is_valid_image(image): # Clear current pixmap if no image. But don't clear text if not qlabel.text(): qlabel.clear() else: - if transparency: - color_code = cv2.COLOR_BGRA2RGBA + height, width, channels = image.shape + + if channels == BGRA_CHANNEL_COUNT: image_format = QtGui.QImage.Format.Format_RGBA8888 + capture = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA) else: - color_code = cv2.COLOR_BGRA2BGR image_format = QtGui.QImage.Format.Format_BGR888 + capture = image - capture = cv2.cvtColor(image, color_code) - height, width, channels = capture.shape qimage = QtGui.QImage(capture.data, width, height, width * channels, image_format) qlabel.setPixmap( QtGui.QPixmap(qimage).scaled( diff --git a/src/AutoSplitImage.py b/src/AutoSplitImage.py index fd2ae8d9..167c0b30 100644 --- a/src/AutoSplitImage.py +++ b/src/AutoSplitImage.py @@ -7,12 +7,14 @@ import cv2 import numpy as np +from cv2.typing import MatLike import error_messages from compare import COMPARE_METHODS_BY_INDEX, check_if_image_has_transparency -from utils import MAXBYTE, RGB_CHANNEL_COUNT, ColorChannel, ImageShape, is_valid_image +from utils import BGR_CHANNEL_COUNT, MAXBYTE, ColorChannel, ImageShape, is_valid_image if TYPE_CHECKING: + from AutoSplit import AutoSplit # Resize to these width and height so that FPS performance increases @@ -32,14 +34,14 @@ class ImageType(IntEnum): START = 2 -class AutoSplitImage(): +class AutoSplitImage: path: str filename: str flags: int loops: int image_type: ImageType - byte_array: cv2.Mat | None = None - mask: cv2.Mat | None = None + byte_array: MatLike | None = None + mask: MatLike | None = None # This value is internal, check for mask instead _has_transparency = False # These values should be overriden by some Defaults if None. Use getters instead @@ -123,7 +125,7 @@ def __read_image_bytes(self, path: str): else: image = cv2.resize(image, COMPARISON_RESIZE, interpolation=cv2.INTER_NEAREST) # Add Alpha channel if missing - if image.shape[ImageShape.Channels] == RGB_CHANNEL_COUNT: + if image.shape[ImageShape.Channels] == BGR_CHANNEL_COUNT: image = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA) self.byte_array = image @@ -134,15 +136,19 @@ def check_flag(self, flag: int): def compare_with_capture( self, default: AutoSplit | int, - capture: cv2.Mat | None, + capture: MatLike | None, ): """Compare image with capture using image's comparison method. Falls back to combobox.""" if not is_valid_image(self.byte_array) or not is_valid_image(capture): return 0.0 - capture = cv2.resize(capture, self.byte_array.shape[1::-1]) + resized_capture = cv2.resize(capture, self.byte_array.shape[1::-1]) comparison_method = self.__get_comparison_method(default) - return COMPARE_METHODS_BY_INDEX.get(comparison_method, compare_dummy)(self.byte_array, capture, self.mask) + return COMPARE_METHODS_BY_INDEX.get( + comparison_method, compare_dummy, + )( + self.byte_array, resized_capture, self.mask, + ) def compare_dummy(*_: object): return 0.0 diff --git a/src/capture_method/BitBltCaptureMethod.py b/src/capture_method/BitBltCaptureMethod.py index c65dda80..887a1f53 100644 --- a/src/capture_method/BitBltCaptureMethod.py +++ b/src/capture_method/BitBltCaptureMethod.py @@ -4,17 +4,19 @@ import ctypes.wintypes from typing import TYPE_CHECKING, cast -import cv2 import numpy as np import pywintypes import win32con import win32ui +from cv2.typing import MatLike +from typing_extensions import override from win32 import win32gui from capture_method.CaptureMethodBase import CaptureMethodBase -from utils import RGBA_CHANNEL_COUNT, get_window_bounds, is_valid_hwnd, try_delete_dc +from utils import BGRA_CHANNEL_COUNT, get_window_bounds, is_valid_hwnd, try_delete_dc if TYPE_CHECKING: + from AutoSplit import AutoSplit # This is an undocumented nFlag value for PrintWindow @@ -32,10 +34,11 @@ class BitBltCaptureMethod(CaptureMethodBase): _render_full_content = False - def get_frame(self, autosplit: AutoSplit) -> tuple[cv2.Mat | None, bool]: + @override + def get_frame(self, autosplit: AutoSplit) -> tuple[MatLike | None, bool]: selection = autosplit.settings_dict["capture_region"] hwnd = autosplit.hwnd - image: cv2.Mat | None = None + image: MatLike | None = None if not self.check_selected_region_exists(autosplit): return None, False @@ -64,7 +67,7 @@ def get_frame(self, autosplit: AutoSplit) -> tuple[cv2.Mat | None, bool]: win32con.SRCCOPY, ) image = np.frombuffer(cast(bytes, bitmap.GetBitmapBits(True)), dtype=np.uint8) - image.shape = (selection["height"], selection["width"], RGBA_CHANNEL_COUNT) + image.shape = (selection["height"], selection["width"], BGRA_CHANNEL_COUNT) except (win32ui.error, pywintypes.error): # Invalid handle or the window was closed while it was being manipulated return None, False @@ -76,6 +79,7 @@ def get_frame(self, autosplit: AutoSplit) -> tuple[cv2.Mat | None, bool]: win32gui.DeleteObject(bitmap.GetHandle()) return image, False + @override def recover_window(self, captured_window_title: str, autosplit: AutoSplit): hwnd = win32gui.FindWindow(None, captured_window_title) if not is_valid_hwnd(hwnd): diff --git a/src/capture_method/CaptureMethodBase.py b/src/capture_method/CaptureMethodBase.py index bc078b50..b7dcc09c 100644 --- a/src/capture_method/CaptureMethodBase.py +++ b/src/capture_method/CaptureMethodBase.py @@ -2,15 +2,16 @@ from typing import TYPE_CHECKING -import cv2 +from cv2.typing import MatLike from utils import is_valid_hwnd if TYPE_CHECKING: + from AutoSplit import AutoSplit -class CaptureMethodBase(): +class CaptureMethodBase: name = "None" short_description = "" description = "" @@ -21,13 +22,13 @@ def __init__(self, autosplit: AutoSplit | None): def reinitialize(self, autosplit: AutoSplit): self.close(autosplit) - self.__init__(autosplit) + self.__init__(autosplit) # type: ignore[misc] def close(self, autosplit: AutoSplit): # Some capture methods don't need an initialization process pass - def get_frame(self, autosplit: AutoSplit) -> tuple[cv2.Mat | None, bool]: + def get_frame(self, autosplit: AutoSplit) -> tuple[MatLike | None, bool]: """ Captures an image of the region for a window matching the given parameters of the bounding box. diff --git a/src/capture_method/DesktopDuplicationCaptureMethod.py b/src/capture_method/DesktopDuplicationCaptureMethod.py index da81bc46..cf5839e0 100644 --- a/src/capture_method/DesktopDuplicationCaptureMethod.py +++ b/src/capture_method/DesktopDuplicationCaptureMethod.py @@ -1,11 +1,13 @@ from __future__ import annotations import ctypes -from typing import TYPE_CHECKING, cast +from typing import TYPE_CHECKING, Union, cast import cv2 import d3dshot +import numpy as np import win32con +from typing_extensions import override from win32 import win32gui from capture_method.BitBltCaptureMethod import BitBltCaptureMethod @@ -33,6 +35,7 @@ def __init__(self, autosplit: AutoSplit | None): # Must not set statically as some laptops will throw an error self.desktop_duplication = d3dshot.create(capture_output="numpy") + @override def get_frame(self, autosplit: AutoSplit): selection = autosplit.settings_dict["capture_region"] hwnd = autosplit.hwnd @@ -53,7 +56,10 @@ def get_frame(self, autosplit: AutoSplit): top = selection["y"] + offset_y + top_bounds right = selection["width"] + left bottom = selection["height"] + top - screenshot = self.desktop_duplication.screenshot((left, top, right, bottom)) + screenshot = cast( + Union[np.ndarray[int, np.dtype[np.generic]], None], + self.desktop_duplication.screenshot((left, top, right, bottom)), + ) if screenshot is None: return None, False - return cv2.cvtColor(cast(cv2.Mat, screenshot), cv2.COLOR_RGBA2BGRA), False + return cv2.cvtColor(screenshot, cv2.COLOR_RGB2BGRA), False diff --git a/src/capture_method/VideoCaptureDeviceCaptureMethod.py b/src/capture_method/VideoCaptureDeviceCaptureMethod.py index e557ef48..9733acea 100644 --- a/src/capture_method/VideoCaptureDeviceCaptureMethod.py +++ b/src/capture_method/VideoCaptureDeviceCaptureMethod.py @@ -4,24 +4,33 @@ from typing import TYPE_CHECKING import cv2 +import cv2.Error import numpy as np -from pygrabber import dshow_graph +from cv2.typing import MatLike +from pygrabber.dshow_graph import FilterGraph +from typing_extensions import override from capture_method.CaptureMethodBase import CaptureMethodBase from error_messages import CREATE_NEW_ISSUE_MESSAGE, exception_traceback -from utils import is_valid_image +from utils import ImageShape, is_valid_image if TYPE_CHECKING: + from AutoSplit import AutoSplit OBS_VIRTUALCAM_PLUGIN_BLANK_PIXEL = [127, 129, 128] -def is_blank(image: cv2.Mat): +def is_blank(image: MatLike): # Running np.all on the entire array or looping manually through the # entire array is extremely slow when we can't stop early. # Instead we check for a few key pixels, in this case, corners - return np.all(image[::image.shape[0] - 1, ::image.shape[1] - 1] == OBS_VIRTUALCAM_PLUGIN_BLANK_PIXEL) + return np.all( + image[ + ::image.shape[ImageShape.Y] - 1, + ::image.shape[ImageShape.X] - 1, + ] == OBS_VIRTUALCAM_PLUGIN_BLANK_PIXEL, + ) class VideoCaptureDeviceCaptureMethod(CaptureMethodBase): @@ -35,7 +44,7 @@ class VideoCaptureDeviceCaptureMethod(CaptureMethodBase): capture_device: cv2.VideoCapture capture_thread: Thread | None stop_thread: Event - last_captured_frame: cv2.Mat | None = None + last_captured_frame: MatLike | None = None is_old_image = False def __read_loop(self, autosplit: AutoSplit): @@ -43,15 +52,15 @@ def __read_loop(self, autosplit: AutoSplit): while not self.stop_thread.is_set(): try: result, image = self.capture_device.read() - except cv2.error as error: + except cv2.error as cv2_error: if not ( - error.code == cv2.Error.STS_ERROR + cv2_error.code == cv2.Error.STS_ERROR and ( # Likely means the camera is occupied - error.msg.endswith("in function 'cv::VideoCapture::grab'\n") + cv2_error.msg.endswith("in function 'cv::VideoCapture::grab'\n") # Some capture cards we cannot use directly # https://github.com/opencv/opencv/issues/23539 - or error.msg.endswith("in function 'cv::VideoCapture::retrieve'\n") + or cv2_error.msg.endswith("in function 'cv::VideoCapture::retrieve'\n") ) ): raise @@ -80,7 +89,7 @@ def __read_loop(self, autosplit: AutoSplit): def __init__(self, autosplit: AutoSplit): super().__init__(autosplit) - filter_graph = dshow_graph.FilterGraph() + filter_graph = FilterGraph() filter_graph.add_video_input_device(autosplit.settings_dict["capture_device_id"]) width, height = filter_graph.get_input_device().get_current_format() filter_graph.remove_filters() @@ -98,6 +107,7 @@ def __init__(self, autosplit: AutoSplit): self.capture_thread = Thread(target=lambda: self.__read_loop(autosplit)) self.capture_thread.start() + @override def close(self, autosplit: AutoSplit): self.stop_thread.set() if self.capture_thread: @@ -105,6 +115,7 @@ def close(self, autosplit: AutoSplit): self.capture_thread = None self.capture_device.release() + @override def get_frame(self, autosplit: AutoSplit): if not self.check_selected_region_exists(autosplit): return None, False @@ -117,13 +128,14 @@ def get_frame(self, autosplit: AutoSplit): selection = autosplit.settings_dict["capture_region"] # Ensure we can't go OOB of the image - y = min(selection["y"], image.shape[0] - 1) - x = min(selection["x"], image.shape[1] - 1) + y = min(selection["y"], image.shape[ImageShape.Y] - 1) + x = min(selection["x"], image.shape[ImageShape.X] - 1) image = image[ y:y + selection["height"], x:x + selection["width"], ] return cv2.cvtColor(image, cv2.COLOR_BGR2BGRA), is_old_image + @override def check_selected_region_exists(self, autosplit: AutoSplit): return bool(self.capture_device.isOpened()) diff --git a/src/capture_method/WindowsGraphicsCaptureMethod.py b/src/capture_method/WindowsGraphicsCaptureMethod.py index e38f7957..8ba0bb61 100644 --- a/src/capture_method/WindowsGraphicsCaptureMethod.py +++ b/src/capture_method/WindowsGraphicsCaptureMethod.py @@ -3,8 +3,9 @@ import asyncio from typing import TYPE_CHECKING, cast -import cv2 import numpy as np +from cv2.typing import MatLike +from typing_extensions import override from win32 import win32gui from winsdk.windows.graphics import SizeInt32 from winsdk.windows.graphics.capture import Direct3D11CaptureFramePool, GraphicsCaptureSession @@ -13,9 +14,10 @@ from winsdk.windows.graphics.imaging import BitmapBufferAccessMode, SoftwareBitmap from capture_method.CaptureMethodBase import CaptureMethodBase -from utils import RGBA_CHANNEL_COUNT, WGC_MIN_BUILD, WINDOWS_BUILD_NUMBER, get_direct3d_device, is_valid_hwnd +from utils import BGRA_CHANNEL_COUNT, WGC_MIN_BUILD, WINDOWS_BUILD_NUMBER, get_direct3d_device, is_valid_hwnd if TYPE_CHECKING: + from AutoSplit import AutoSplit WGC_NO_BORDER_MIN_BUILD = 20348 @@ -39,7 +41,7 @@ class WindowsGraphicsCaptureMethod(CaptureMethodBase): frame_pool: Direct3D11CaptureFramePool | None = None session: GraphicsCaptureSession | None = None """This is stored to prevent session from being garbage collected""" - last_captured_frame: cv2.Mat | None = None + last_captured_frame: MatLike | None = None def __init__(self, autosplit: AutoSplit): super().__init__(autosplit) @@ -67,6 +69,7 @@ def __init__(self, autosplit: AutoSplit): self.size = item.size self.frame_pool = frame_pool + @override def close(self, autosplit: AutoSplit): if self.frame_pool: self.frame_pool.close() @@ -81,7 +84,8 @@ def close(self, autosplit: AutoSplit): pass self.session = None - def get_frame(self, autosplit: AutoSplit) -> tuple[cv2.Mat | None, bool]: + @override + def get_frame(self, autosplit: AutoSplit) -> tuple[MatLike | None, bool]: selection = autosplit.settings_dict["capture_region"] # We still need to check the hwnd because WGC will return a blank black image if not ( @@ -119,7 +123,7 @@ async def coroutine(): raise ValueError("Unable to obtain the BitmapBuffer from SoftwareBitmap.") reference = bitmap_buffer.create_reference() image = np.frombuffer(cast(bytes, reference), dtype=np.uint8) - image.shape = (self.size.height, self.size.width, RGBA_CHANNEL_COUNT) + image.shape = (self.size.height, self.size.width, BGRA_CHANNEL_COUNT) image = image[ selection["y"]:selection["y"] + selection["height"], selection["x"]:selection["x"] + selection["width"], @@ -127,14 +131,14 @@ async def coroutine(): self.last_captured_frame = image return image, False + @override def recover_window(self, captured_window_title: str, autosplit: AutoSplit): hwnd = win32gui.FindWindow(None, captured_window_title) if not is_valid_hwnd(hwnd): return False autosplit.hwnd = hwnd - self.close(autosplit) try: - self.__init__(autosplit) + self.reinitialize(autosplit) # Unrecordable hwnd found as the game is crashing except OSError as exception: if str(exception).endswith("The parameter is incorrect"): @@ -142,6 +146,7 @@ def recover_window(self, captured_window_title: str, autosplit: AutoSplit): raise return self.check_selected_region_exists(autosplit) + @override def check_selected_region_exists(self, autosplit: AutoSplit): return bool( is_valid_hwnd(autosplit.hwnd) diff --git a/src/capture_method/__init__.py b/src/capture_method/__init__.py index 31c3f036..5d5cfb92 100644 --- a/src/capture_method/__init__.py +++ b/src/capture_method/__init__.py @@ -3,11 +3,12 @@ import asyncio from collections import OrderedDict from dataclasses import dataclass -from enum import Enum, EnumMeta, unique -from typing import TYPE_CHECKING, TypedDict, cast +from enum import Enum, EnumMeta, auto, unique +from typing import TYPE_CHECKING, NoReturn, TypedDict, cast from _ctypes import COMError from pygrabber.dshow_graph import FilterGraph +from typing_extensions import Never, override from capture_method.BitBltCaptureMethod import BitBltCaptureMethod from capture_method.CaptureMethodBase import CaptureMethodBase @@ -30,7 +31,8 @@ class Region(TypedDict): class CaptureMethodMeta(EnumMeta): # Allow checking if simple string is enum - def __contains__(self, other: str): + @override + def __contains__(self, other: object): try: self(other) except ValueError: @@ -39,26 +41,39 @@ def __contains__(self, other: str): @unique +# TODO: Try StrEnum in Python 3.11 class CaptureMethodEnum(Enum, metaclass=CaptureMethodMeta): # Allow TOML to save as a simple string + @override def __repr__(self): return self.value - __str__ = __repr__ - # Allow direct comparison with strings + @override def __eq__(self, other: object): - return self.value == other.__str__() - - # Restore hashing functionality + if isinstance(other, str): + return self.value == other + if isinstance(other, Enum): + return self.value == other.value + return other == self + + # Restore hashing functionality for use in Maps + @override def __hash__(self): return self.value.__hash__() + # https://github.com/python/typeshed/issues/10428 + @override + def _generate_next_value_( # type:ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + name: str | CaptureMethodEnum, *_, # noqa: N805 + ): + return name + NONE = "" - BITBLT = "BITBLT" - WINDOWS_GRAPHICS_CAPTURE = "WINDOWS_GRAPHICS_CAPTURE" - PRINTWINDOW_RENDERFULLCONTENT = "PRINTWINDOW_RENDERFULLCONTENT" - DESKTOP_DUPLICATION = "DESKTOP_DUPLICATION" - VIDEO_CAPTURE_DEVICE = "VIDEO_CAPTURE_DEVICE" + BITBLT = auto() + WINDOWS_GRAPHICS_CAPTURE = auto() + PRINTWINDOW_RENDERFULLCONTENT = auto() + DESKTOP_DUPLICATION = auto() + VIDEO_CAPTURE_DEVICE = auto() class CaptureMethodDict(OrderedDict[CaptureMethodEnum, type[CaptureMethodBase]]): @@ -81,18 +96,24 @@ def get_method_by_index(self, index: int): return first(self) return list(self.keys())[index] - if TYPE_CHECKING: - __getitem__ = None # pyright: ignore[reportGeneralTypeIssues] # Disallow unsafe get + # Disallow unsafe get w/o breaking it at runtime + @override + def __getitem__( # type:ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + self, + __key: Never, + ) -> NoReturn | type[CaptureMethodBase]: + return super().__getitem__(__key) - def get(self, __key: CaptureMethodEnum): + @override + def get(self, key: CaptureMethodEnum, __default: object = None): """ Returns the `CaptureMethodBase` subclass for `CaptureMethodEnum` if `CaptureMethodEnum` is available, else defaults to the first available `CaptureMethodEnum`. - Returns `CaptureMethodBase` (default) directly if there's no capture methods. + Returns `CaptureMethodBase` directly if there's no capture methods. """ - if __key == CaptureMethodEnum.NONE or len(self) <= 0: + if key == CaptureMethodEnum.NONE or len(self) <= 0: return CaptureMethodBase - return super().get(__key, first(self.values())) + return super().get(key, first(self.values())) CAPTURE_METHODS = CaptureMethodDict() @@ -126,7 +147,7 @@ def change_capture_method(selected_capture_method: CaptureMethodEnum, autosplit: @dataclass -class CameraInfo(): +class CameraInfo: device_id: int name: str occupied: bool diff --git a/src/compare.py b/src/compare.py index 2e197a73..494381f4 100644 --- a/src/compare.py +++ b/src/compare.py @@ -4,18 +4,19 @@ import cv2 import imagehash +from cv2.typing import MatLike from PIL import Image -from utils import MAXBYTE, RGBA_CHANNEL_COUNT, ColorChannel, is_valid_image +from utils import BGRA_CHANNEL_COUNT, MAXBYTE, ColorChannel, ImageShape, is_valid_image MAXRANGE = MAXBYTE + 1 -CHANNELS: list[int] = [ColorChannel.Red, ColorChannel.Green, ColorChannel.Blue] +CHANNELS = [ColorChannel.Red.value, ColorChannel.Green.value, ColorChannel.Blue.value] HISTOGRAM_SIZE = [8, 8, 8] RANGES = [0, MAXRANGE, 0, MAXRANGE, 0, MAXRANGE] MASK_SIZE_MULTIPLIER = ColorChannel.Alpha * MAXBYTE * MAXBYTE -def compare_histograms(source: cv2.Mat, capture: cv2.Mat, mask: cv2.Mat | None = None): +def compare_histograms(source: MatLike, capture: MatLike, mask: MatLike | None = None): """ Compares two images by calculating their histograms, normalizing them, and then comparing them using Bhattacharyya distance. @@ -34,7 +35,7 @@ def compare_histograms(source: cv2.Mat, capture: cv2.Mat, mask: cv2.Mat | None = return 1 - cv2.compareHist(source_hist, capture_hist, cv2.HISTCMP_BHATTACHARYYA) -def compare_l2_norm(source: cv2.Mat, capture: cv2.Mat, mask: cv2.Mat | None = None): +def compare_l2_norm(source: MatLike, capture: MatLike, mask: MatLike | None = None): """ Compares two images by calculating the L2 Error (square-root of sum of squared error) @param source: Image of any given shape @@ -54,7 +55,7 @@ def compare_l2_norm(source: cv2.Mat, capture: cv2.Mat, mask: cv2.Mat | None = No return 1 - (error / max_error) -def compare_template(source: cv2.Mat, capture: cv2.Mat, mask: cv2.Mat | None = None): +def compare_template(source: MatLike, capture: MatLike, mask: MatLike | None = None): """ Checks if the source is located within the capture by using the sum of square differences. The mask is used to search for non-rectangular images within the capture. @@ -77,7 +78,7 @@ def compare_template(source: cv2.Mat, capture: cv2.Mat, mask: cv2.Mat | None = N return 1 - (min_val / max_error) -def compare_phash(source: cv2.Mat, capture: cv2.Mat, mask: cv2.Mat | None = None): +def compare_phash(source: MatLike, capture: MatLike, mask: MatLike | None = None): """ Compares the Perceptual Hash of the two given images and returns the similarity between the two. @@ -101,9 +102,9 @@ def compare_phash(source: cv2.Mat, capture: cv2.Mat, mask: cv2.Mat | None = None return 1 - (hash_diff / 64.0) -def check_if_image_has_transparency(image: cv2.Mat): +def check_if_image_has_transparency(image: MatLike): # Check if there's a transparency channel (4th channel) and if at least one pixel is transparent (< 255) - if image.shape[2] != RGBA_CHANNEL_COUNT: + if image.shape[ImageShape.Channels] != BGRA_CHANNEL_COUNT: return False mean: float = image[:, :, ColorChannel.Alpha].mean() if mean == 0: diff --git a/src/error_messages.py b/src/error_messages.py index 63462140..3a44f023 100644 --- a/src/error_messages.py +++ b/src/error_messages.py @@ -75,13 +75,12 @@ def split_hotkey(): def pause_hotkey(): set_text_message( - "Your split image folder contains an image filename with a pause flag {p}, " - + "but no pause hotkey is set.", + "Your split image folder contains an image filename with a pause flag {p}, but no pause hotkey is set.", ) -def align_region_image_type(): - set_text_message("File not a valid image file") +def image_validity(image: str = "File"): + set_text_message(f"{image} not a valid image file") def alignment_not_matched(): @@ -97,7 +96,7 @@ def multiple_keyword_images(keyword: str): def reset_hotkey(): - set_text_message("Your split image folder contains a reset image, but no reset hotkey is set.") + set_text_message("Your split image folder contains a Reset Image, but no reset hotkey is set.") def old_version_settings_file(): @@ -133,8 +132,8 @@ def check_for_updates(): def load_start_image(): set_text_message( - "Start Image found, but cannot be loaded unless Start, Reset, and Pause hotkeys are set. " - + "Please set these hotkeys, and then click the Reload Start Image button.", + "Start Image found, but cannot be loaded unless Start hotkey is set. " + + "Please set the hotkey, and then click the Reload Start Image button.", ) diff --git a/src/hotkeys.py b/src/hotkeys.py index b0575467..2569cee9 100644 --- a/src/hotkeys.py +++ b/src/hotkeys.py @@ -20,8 +20,8 @@ PRESS_A_KEY_TEXT = "Press a key..." Commands = Literal["split", "start", "pause", "reset", "skip", "undo"] -Hotkey = Literal["split", "reset", "skip_split", "undo_split", "pause", "toggle_auto_reset_image"] -HOTKEYS: list[Hotkey] = ["split", "reset", "skip_split", "undo_split", "pause", "toggle_auto_reset_image"] +Hotkey = Literal["split", "reset", "skip_split", "undo_split", "pause", "screenshot", "toggle_auto_reset_image"] +HOTKEYS: list[Hotkey] = ["split", "reset", "skip_split", "undo_split", "pause", "screenshot", "toggle_auto_reset_image"] def remove_all_hotkeys(): @@ -50,9 +50,18 @@ def after_setting_hotkey(autosplit: AutoSplit): def send_command(autosplit: AutoSplit, command: Commands): + # Note: Rather than having the start image able to also reset the timer, + # having the reset image check be active at all time would be a better, more organic solution, + # but that is dependent on migrating to an observer pattern (#219) and being able to reload all images. if autosplit.is_auto_controlled: + if command == "start" and autosplit.settings_dict["start_also_resets"]: + print("reset", flush=True) print(command, flush=True) - elif command in {"split", "start"}: + elif command == "start": + if autosplit.settings_dict["start_also_resets"]: + _send_hotkey(autosplit.settings_dict["reset_hotkey"]) + _send_hotkey(autosplit.settings_dict["split_hotkey"]) + elif command == "split": _send_hotkey(autosplit.settings_dict["split_hotkey"]) elif command == "pause": _send_hotkey(autosplit.settings_dict["pause_hotkey"]) @@ -64,7 +73,7 @@ def send_command(autosplit: AutoSplit, command: Commands): _send_hotkey(autosplit.settings_dict["undo_split_hotkey"]) else: - raise KeyError(f"{command!r} is not a valid LiveSplit.AutoSplitIntegration command") + raise KeyError(f"{command!r} is not a valid command") def _unhook(hotkey_callback: Callable[[], None] | None): @@ -82,8 +91,10 @@ def _send_hotkey(hotkey_or_scan_code: int | str | None): # Deal with regular inputs # If an int or does not contain the following strings - if isinstance(hotkey_or_scan_code, int) \ - or not ("num " in hotkey_or_scan_code or "decimal" in hotkey_or_scan_code or "+" in hotkey_or_scan_code): + if ( + isinstance(hotkey_or_scan_code, int) + or not any(key in hotkey_or_scan_code for key in ("num ", "decimal", "+")) + ): keyboard.send(hotkey_or_scan_code) return @@ -155,11 +166,14 @@ def __get_hotkey_name(names: list[str]): Uses keyboard.get_hotkey_name but works with non-english modifiers and keypad See: https://github.com/boppreh/keyboard/issues/516 . """ - def sorting_key(key: str): - return not keyboard.is_modifier(keyboard.key_to_scan_codes(key)[0]) + if len(names) == 0: + return "" if len(names) == 1: return names[0] + + def sorting_key(key: str): + return not keyboard.is_modifier(keyboard.key_to_scan_codes(key)[0]) clean_names = sorted(keyboard.get_hotkey_name(names).split("+"), key=sorting_key) # Replace the last key in hotkey_name with what we actually got as a last key_name # This ensures we keep proper keypad names @@ -176,6 +190,10 @@ def __read_hotkey(): keyboard_event = keyboard.read_event(True) # LiveSplit supports modifier keys as the last key, so any keyup means end of hotkey if keyboard_event.event_type == keyboard.KEY_UP: + # Unless keyup is also the very first event, + # which can happen from a very fast press at the same time we start reading + if len(names) == 0: + continue break key_name = __get_key_name(keyboard_event) # Ignore long presses @@ -242,14 +260,22 @@ def read_and_set_hotkey(): try: hotkey_name = preselected_hotkey_name if preselected_hotkey_name else __read_hotkey() + # Unset hotkey by pressing "Escape". This is the same behaviour as LiveSplit + if hotkey_name == "esc": + _unhook(getattr(autosplit, f"{hotkey}_hotkey")) + autosplit.settings_dict[f"{hotkey}_hotkey"] = "" # pyright: ignore[reportGeneralTypeIssues] + if autosplit.SettingsWidget: + getattr(autosplit.SettingsWidget, f"{hotkey}_input").setText("") + return + if not is_valid_hotkey_name(hotkey_name): autosplit.show_error_signal.emit(lambda: error_messages.invalid_hotkey(hotkey_name)) return # Try to remove the previously set hotkey if there is one _unhook(getattr(autosplit, f"{hotkey}_hotkey")) - # Remove any hotkey using the same key combination + # Remove any hotkey using the same key combination __remove_key_already_set(autosplit, hotkey_name) action = __get_hotkey_action(autosplit, hotkey) diff --git a/src/menu_bar.py b/src/menu_bar.py index 19b6b549..176fd2c4 100644 --- a/src/menu_bar.py +++ b/src/menu_bar.py @@ -7,7 +7,11 @@ import requests from packaging.version import parse as version_parse from PySide6 import QtCore, QtWidgets +from PySide6.QtCore import Qt +from PySide6.QtGui import QBrush, QPalette +from PySide6.QtWidgets import QFileDialog from requests.exceptions import RequestException +from typing_extensions import override import error_messages import user_profile @@ -25,6 +29,8 @@ if TYPE_CHECKING: from AutoSplit import AutoSplit +HALF_BRIGHTNESS = 128 + class __AboutWidget(QtWidgets.QWidget, about.Ui_AboutAutoSplitWidget): # noqa: N801 # Private class """About Window.""" @@ -90,6 +96,7 @@ def __init__(self, autosplit: AutoSplit, check_on_open: bool): self.autosplit = autosplit self.check_on_open = check_on_open + @override def run(self): try: response = requests.get(f"https://api.github.com/repos/{GITHUB_REPOSITORY}/releases/latest", timeout=30) @@ -114,11 +121,59 @@ def check_for_updates(autosplit: AutoSplit, check_on_open: bool = False): class __SettingsWidget(QtWidgets.QWidget, settings_ui.Ui_SettingsWidget): # noqa: N801 # Private class - __video_capture_devices: list[CameraInfo] = [] - """ - Used to temporarily store the existing cameras, - we don't want to call `get_all_video_capture_devices` agains and possibly have a different result - """ + def __init__(self, autosplit: AutoSplit): + super().__init__() + self.__video_capture_devices: list[CameraInfo] = [] + """ + Used to temporarily store the existing cameras, + we don't want to call `get_all_video_capture_devices` agains and possibly have a different result + """ + + self.setupUi(self) + + # Fix Fusion Dark Theme's tabs content looking weird because it's using the button role + window_color = self.palette().color(QPalette.ColorRole.Window) + if window_color.red() < HALF_BRIGHTNESS: + brush = QBrush(window_color) + brush.setStyle(Qt.BrushStyle.SolidPattern) + palette = QPalette() + palette.setBrush(QPalette.ColorGroup.Active, QPalette.ColorRole.Button, brush) + palette.setBrush(QPalette.ColorGroup.Inactive, QPalette.ColorRole.Button, brush) + palette.setBrush(QPalette.ColorGroup.Disabled, QPalette.ColorRole.Button, brush) + self.settings_tabs.setPalette(palette) + + self.autosplit = autosplit + self.__set_readme_link() + # Don't autofocus any particular field + self.setFocus() + + +# region Build the Capture method combobox + capture_method_values = CAPTURE_METHODS.values() + self.__set_all_capture_devices() + + # TODO: Word-wrapping works, but there's lots of extra padding to the right. Raise issue upstream + # list_view = QtWidgets.QListView() + # list_view.setWordWrap(True) + # list_view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOff) + # list_view.setFixedWidth(self.capture_method_combobox.width()) + # self.capture_method_combobox.setView(list_view) + + self.capture_method_combobox.addItems([ + f"- {method.name} ({method.short_description})" + for method in capture_method_values + ]) + self.capture_method_combobox.setToolTip( + "\n\n".join([ + f"{method.name} :\n{method.description}" + for method in capture_method_values + ]), + ) +# endregion + + self.__setup_bindings() + + self.show() def __update_default_threshold(self, value: Any): self.__set_value("default_similarity_threshold", value) @@ -144,7 +199,8 @@ def get_capture_device_index(self, capture_device_id: int): return 0 def __enable_capture_device_if_its_selected_method( - self, selected_capture_method: str | CaptureMethodEnum | None = None, + self, + selected_capture_method: str | CaptureMethodEnum | None = None, ): if selected_capture_method is None: selected_capture_method = self.autosplit.settings_dict["capture_method"] @@ -210,38 +266,16 @@ def __set_readme_link(self): ) self.readme_link_button.setStyleSheet("border: 0px; background-color:rgba(0,0,0,0%);") - def __init__(self, autosplit: AutoSplit): - super().__init__() - self.setupUi(self) - self.autosplit = autosplit - self.__set_readme_link() - # Don't autofocus any particular field - self.setFocus() - - -# region Build the Capture method combobox - capture_method_values = CAPTURE_METHODS.values() - self.__set_all_capture_devices() - - # TODO: Word-wrapping works, but there's lots of extra padding to the right. Raise issue upstream - # list_view = QtWidgets.QListView() - # list_view.setWordWrap(True) - # list_view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOff) - # list_view.setFixedWidth(self.capture_method_combobox.width()) - # self.capture_method_combobox.setView(list_view) - - self.capture_method_combobox.addItems([ - f"- {method.name} ({method.short_description})" - for method in capture_method_values - ]) - self.capture_method_combobox.setToolTip( - "\n\n".join([ - f"{method.name} :\n{method.description}" - for method in capture_method_values - ]), + def __select_screenshot_directory(self): + self.autosplit.settings_dict["screenshot_directory"] = QFileDialog.getExistingDirectory( + self, + "Select Screenshots Directory", + self.autosplit.settings_dict["screenshot_directory"] + or self.autosplit.settings_dict["split_image_directory"], ) -# endregion + self.screenshot_directory_input.setText(self.autosplit.settings_dict["screenshot_directory"]) + def __setup_bindings(self): # Hotkey initial values and bindings def hotkey_connect(hotkey: Hotkey): return lambda: set_hotkey(self.autosplit, hotkey) @@ -251,33 +285,38 @@ def hotkey_connect(hotkey: Hotkey): hotkey_input.setText( cast( str, - autosplit.settings_dict[f"{hotkey}_hotkey"], # pyright: ignore[reportGeneralTypeIssues] + self.autosplit.settings_dict[f"{hotkey}_hotkey"], # pyright: ignore[reportGeneralTypeIssues] ), ) set_hotkey_hotkey_button.clicked.connect(hotkey_connect(hotkey)) # Make it very clear that hotkeys are not used when auto-controlled - if autosplit.is_auto_controlled and hotkey != "toggle_auto_reset_image": + if self.autosplit.is_auto_controlled and hotkey != "toggle_auto_reset_image": set_hotkey_hotkey_button.setEnabled(False) hotkey_input.setEnabled(False) # region Set initial values # Capture Settings - self.fps_limit_spinbox.setValue(autosplit.settings_dict["fps_limit"]) - self.live_capture_region_checkbox.setChecked(autosplit.settings_dict["live_capture_region"]) + self.fps_limit_spinbox.setValue(self.autosplit.settings_dict["fps_limit"]) + self.live_capture_region_checkbox.setChecked(self.autosplit.settings_dict["live_capture_region"]) self.capture_method_combobox.setCurrentIndex( - CAPTURE_METHODS.get_index(autosplit.settings_dict["capture_method"]), + CAPTURE_METHODS.get_index(self.autosplit.settings_dict["capture_method"]), ) # No self.capture_device_combobox.setCurrentIndex # It'll set itself asynchronously in self.__set_all_capture_devices() + self.screenshot_directory_input.setText(self.autosplit.settings_dict["screenshot_directory"]) + self.open_screenshot_checkbox.setChecked(self.autosplit.settings_dict["open_screenshot"]) # Image Settings - self.default_comparison_method.setCurrentIndex(autosplit.settings_dict["default_comparison_method"]) - self.default_similarity_threshold_spinbox.setValue(autosplit.settings_dict["default_similarity_threshold"]) - self.default_delay_time_spinbox.setValue(autosplit.settings_dict["default_delay_time"]) - self.default_pause_time_spinbox.setValue(autosplit.settings_dict["default_pause_time"]) - self.loop_splits_checkbox.setChecked(autosplit.settings_dict["loop_splits"]) - self.enable_auto_reset_image_checkbox.setChecked(autosplit.settings_dict["enable_auto_reset"]) + self.default_comparison_method_combobox.setCurrentIndex( + self.autosplit.settings_dict["default_comparison_method"], + ) + self.default_similarity_threshold_spinbox.setValue(self.autosplit.settings_dict["default_similarity_threshold"]) + self.default_delay_time_spinbox.setValue(self.autosplit.settings_dict["default_delay_time"]) + self.default_pause_time_spinbox.setValue(self.autosplit.settings_dict["default_pause_time"]) + self.loop_splits_checkbox.setChecked(self.autosplit.settings_dict["loop_splits"]) + self.start_also_resets_checkbox.setChecked(self.autosplit.settings_dict["start_also_resets"]) + self.enable_auto_reset_image_checkbox.setChecked(self.autosplit.settings_dict["enable_auto_reset"]) # endregion # region Binding # Capture Settings @@ -289,10 +328,16 @@ def hotkey_connect(hotkey: Hotkey): lambda: self.__set_value("capture_method", self.__capture_method_changed()), ) self.capture_device_combobox.currentIndexChanged.connect(self.__capture_device_changed) + self.screenshot_directory_browse_button.clicked.connect(self.__select_screenshot_directory) + self.open_screenshot_checkbox.stateChanged.connect( + lambda: self.__set_value("open_screenshot", self.open_screenshot_checkbox.isChecked()), + ) # Image Settings - self.default_comparison_method.currentIndexChanged.connect( - lambda: self.__set_value("default_comparison_method", self.default_comparison_method.currentIndex()), + self.default_comparison_method_combobox.currentIndexChanged.connect( + lambda: self.__set_value( + "default_comparison_method", self.default_comparison_method_combobox.currentIndex(), + ), ) self.default_similarity_threshold_spinbox.valueChanged.connect( lambda: self.__update_default_threshold(self.default_similarity_threshold_spinbox.value()), @@ -306,13 +351,14 @@ def hotkey_connect(hotkey: Hotkey): self.loop_splits_checkbox.stateChanged.connect( lambda: self.__set_value("loop_splits", self.loop_splits_checkbox.isChecked()), ) + self.start_also_resets_checkbox.stateChanged.connect( + lambda: self.__set_value("start_also_resets", self.start_also_resets_checkbox.isChecked()), + ) self.enable_auto_reset_image_checkbox.stateChanged.connect( lambda: self.__set_value("enable_auto_reset", self.enable_auto_reset_image_checkbox.isChecked()), ) # endregion - self.show() - def open_settings(autosplit: AutoSplit): if not autosplit.SettingsWidget or cast(QtWidgets.QWidget, autosplit.SettingsWidget).isHidden(): @@ -329,21 +375,25 @@ def get_default_settings_from_ui(autosplit: AutoSplit): "undo_split_hotkey": default_settings_dialog.undo_split_input.text(), "skip_split_hotkey": default_settings_dialog.skip_split_input.text(), "pause_hotkey": default_settings_dialog.pause_input.text(), + "screenshot_hotkey": default_settings_dialog.screenshot_input.text(), "toggle_auto_reset_image_hotkey": default_settings_dialog.toggle_auto_reset_image_input.text(), "fps_limit": default_settings_dialog.fps_limit_spinbox.value(), "live_capture_region": default_settings_dialog.live_capture_region_checkbox.isChecked(), - "enable_auto_reset": default_settings_dialog.enable_auto_reset_image_checkbox.isChecked(), "capture_method": CAPTURE_METHODS.get_method_by_index( default_settings_dialog.capture_method_combobox.currentIndex(), ), "capture_device_id": default_settings_dialog.capture_device_combobox.currentIndex(), "capture_device_name": "", - "default_comparison_method": default_settings_dialog.default_comparison_method.currentIndex(), + "default_comparison_method": default_settings_dialog.default_comparison_method_combobox.currentIndex(), "default_similarity_threshold": default_settings_dialog.default_similarity_threshold_spinbox.value(), "default_delay_time": default_settings_dialog.default_delay_time_spinbox.value(), "default_pause_time": default_settings_dialog.default_pause_time_spinbox.value(), "loop_splits": default_settings_dialog.loop_splits_checkbox.isChecked(), + "start_also_resets": default_settings_dialog.start_also_resets_checkbox.isChecked(), + "enable_auto_reset": default_settings_dialog.enable_auto_reset_image_checkbox.isChecked(), "split_image_directory": autosplit.split_image_folder_input.text(), + "screenshot_directory": default_settings_dialog.screenshot_directory_input.text(), + "open_screenshot": default_settings_dialog.open_screenshot_checkbox.isChecked(), "captured_window_title": "", "capture_region": { "x": autosplit.x_spinbox.value(), diff --git a/src/region_selection.py b/src/region_selection.py index 816f0976..0f1bec75 100644 --- a/src/region_selection.py +++ b/src/region_selection.py @@ -8,6 +8,7 @@ import cv2 import numpy as np +from cv2.typing import MatLike from PySide6 import QtCore, QtGui, QtWidgets from PySide6.QtTest import QTest from pywinctl import getTopWindowAt @@ -19,11 +20,21 @@ from winsdk.windows.graphics.capture import GraphicsCaptureItem, GraphicsCapturePicker import error_messages -from utils import MAXBYTE, RGB_CHANNEL_COUNT, ImageShape, get_window_bounds, is_valid_hwnd, is_valid_image +from utils import ( + BGR_CHANNEL_COUNT, + MAXBYTE, + ImageShape, + auto_split_directory, + get_window_bounds, + is_valid_hwnd, + is_valid_image, +) user32 = ctypes.windll.user32 + if TYPE_CHECKING: + from AutoSplit import AutoSplit ALIGN_REGION_THRESHOLD = 0.9 @@ -169,7 +180,7 @@ def align_region(autosplit: AutoSplit): template_filename = QtWidgets.QFileDialog.getOpenFileName( autosplit, "Select Reference Image", - "", + autosplit.settings_dict["split_image_directory"] or auto_split_directory, IMREAD_EXT_FILTER, )[0] @@ -177,11 +188,14 @@ def align_region(autosplit: AutoSplit): if not template_filename: return - template = cv2.imread(template_filename, cv2.IMREAD_COLOR) + template = cv2.imread(template_filename, cv2.IMREAD_UNCHANGED) + # Add alpha channel to template if it's missing. + if template.shape[ImageShape.Channels] == BGR_CHANNEL_COUNT: + template = cv2.cvtColor(template, cv2.COLOR_BGR2BGRA) # Validate template is a valid image file if not is_valid_image(template): - error_messages.align_region_image_type() + error_messages.image_validity() return # Obtaining the capture of a region which contains the @@ -222,7 +236,7 @@ def __set_region_values(autosplit: AutoSplit, left: int, top: int, width: int, h autosplit.height_spinbox.setValue(height) -def __test_alignment(capture: cv2.Mat, template: cv2.Mat): +def __test_alignment(capture: MatLike, template: MatLike): """ Obtain the best matching point for the template within the capture. This assumes that the template is actually smaller @@ -236,18 +250,13 @@ def __test_alignment(capture: cv2.Mat, template: cv2.Mat): best_width = 0 best_loc = (0, 0) - # Add alpha channel to template if it's missing. The cv2.matchTemplate() function - # needs both images to have the same color dimensions, and capture has an alpha channel - if template.shape[ImageShape.Channels] == RGB_CHANNEL_COUNT: - template = cv2.cvtColor(template, cv2.COLOR_BGR2BGRA) - # This tests 50 images scaled from 20% to 300% of the original template size for scale in np.linspace(0.2, 3, num=56): - width = int(template.shape[1] * scale) - height = int(template.shape[0] * scale) + width = int(template.shape[ImageShape.X] * scale) + height = int(template.shape[ImageShape.Y] * scale) # The template can not be larger than the capture - if width > capture.shape[1] or height > capture.shape[0]: + if width > capture.shape[ImageShape.X] or height > capture.shape[ImageShape.Y]: continue resized = cv2.resize(template, (width, height), interpolation=cv2.INTER_NEAREST) diff --git a/src/split_parser.py b/src/split_parser.py index 250c866d..070c9838 100644 --- a/src/split_parser.py +++ b/src/split_parser.py @@ -1,6 +1,7 @@ from __future__ import annotations import os +from collections.abc import Callable from typing import TYPE_CHECKING, TypeVar import error_messages @@ -31,8 +32,8 @@ def __value_from_filename( if len(delimiters) != 2: # noqa: PLR2004 raise ValueError("delimiters parameter must contain exactly 2 characters") try: - value_type = type(default_value) - value = value_type(filename.split(delimiters[0], 1)[1].split(delimiters[1])[0]) + string_value = filename.split(delimiters[0], 1)[1].split(delimiters[1])[0] + value: T = type(default_value)(string_value) except (IndexError, ValueError): return default_value else: @@ -182,42 +183,67 @@ def parse_and_validate_images(autosplit: AutoSplit): ] # Find non-split images and then remove them from the list - autosplit.start_image = __pop_image_type(all_images, ImageType.START) - autosplit.reset_image = __pop_image_type(all_images, ImageType.RESET) - autosplit.split_images = all_images + start_image = __pop_image_type(all_images, ImageType.START) + reset_image = __pop_image_type(all_images, ImageType.RESET) + split_images = all_images + + error_message: Callable[[], object] | None = None + + # If there is no start hotkey set but a Start Image is present, and is not auto controlled, throw an error. + if ( + start_image + and not autosplit.settings_dict["split_hotkey"] + and not autosplit.is_auto_controlled + ): + error_message = error_messages.load_start_image + + # If there is no reset hotkey set but a Reset Image is present, and is not auto controlled, throw an error. + elif ( + reset_image + and not autosplit.settings_dict["reset_hotkey"] + and not autosplit.is_auto_controlled + ): + error_message = error_messages.reset_hotkey # Make sure that each of the images follows the guidelines for correct format # according to all of the settings selected by the user. - for image in autosplit.split_images: - # Test for image without transparency - if not is_valid_image(image.byte_array): - autosplit.gui_changes_on_reset() - return False - - # error out if there is a {p} flag but no pause hotkey set and is not auto controlled. - if ( - not autosplit.settings_dict["pause_hotkey"] - and image.check_flag(PAUSE_FLAG) - and not autosplit.is_auto_controlled - ): - autosplit.gui_changes_on_reset() - error_messages.pause_hotkey() - return False - - # Check that there's only one reset image - if image.image_type == ImageType.RESET: - # If there is no reset hotkey set but a reset image is present, and is not auto controlled, throw an error. - if not autosplit.settings_dict["reset_hotkey"] and not autosplit.is_auto_controlled: - autosplit.gui_changes_on_reset() - error_messages.reset_hotkey() - return False - autosplit.gui_changes_on_reset() - error_messages.multiple_keyword_images(RESET_KEYWORD) - return False - - # Check that there's only one start image - if image.image_type == ImageType.START: - autosplit.gui_changes_on_reset() - error_messages.multiple_keyword_images(START_KEYWORD) - return False + else: + for image in split_images: + # Test for image without transparency + if not is_valid_image(image.byte_array): + def image_validity(filename: str): + return lambda: error_messages.image_validity(filename) + error_message = image_validity(image.filename) + break + + # error out if there is a {p} flag but no pause hotkey set and is not auto controlled. + if ( + not autosplit.settings_dict["pause_hotkey"] + and image.check_flag(PAUSE_FLAG) + and not autosplit.is_auto_controlled + ): + error_message = error_messages.pause_hotkey + break + + # Check that there's only one Reset Image + if image.image_type == ImageType.RESET: + error_message = lambda: error_messages.multiple_keyword_images(RESET_KEYWORD) # noqa: E731 + break + + # Check that there's only one Start Image + if image.image_type == ImageType.START: + error_message = lambda: error_messages.multiple_keyword_images(START_KEYWORD) # noqa: E731 + break + + if error_message: + autosplit.start_image = None + autosplit.reset_image = None + autosplit.split_images = [] + autosplit.gui_changes_on_reset() + error_message() + return False + + autosplit.start_image = start_image + autosplit.reset_image = reset_image + autosplit.split_images = split_images return True diff --git a/src/user_profile.py b/src/user_profile.py index 8cf5075f..25d93251 100644 --- a/src/user_profile.py +++ b/src/user_profile.py @@ -22,10 +22,10 @@ class UserProfileDict(TypedDict): undo_split_hotkey: str skip_split_hotkey: str pause_hotkey: str + screenshot_hotkey: str toggle_auto_reset_image_hotkey: str fps_limit: int live_capture_region: bool - enable_auto_reset: bool capture_method: str | CaptureMethodEnum capture_device_id: int capture_device_name: str @@ -34,7 +34,11 @@ class UserProfileDict(TypedDict): default_delay_time: int default_pause_time: float loop_splits: bool + start_also_resets: bool + enable_auto_reset: bool split_image_directory: str + screenshot_directory: str + open_screenshot: bool captured_window_title: str capture_region: Region @@ -45,10 +49,10 @@ class UserProfileDict(TypedDict): undo_split_hotkey="", skip_split_hotkey="", pause_hotkey="", + screenshot_hotkey="", toggle_auto_reset_image_hotkey="", fps_limit=60, live_capture_region=True, - enable_auto_reset=True, capture_method=CAPTURE_METHODS.get_method_by_index(0), capture_device_id=0, capture_device_name="", @@ -57,7 +61,11 @@ class UserProfileDict(TypedDict): default_delay_time=0, default_pause_time=10, loop_splits=False, + start_also_resets=False, + enable_auto_reset=True, split_image_directory="", + screenshot_directory="", + open_screenshot=True, captured_window_title="", capture_region=Region(x=0, y=0, width=1, height=1), ) @@ -109,13 +117,14 @@ def __load_settings_from_file(autosplit: AutoSplit, load_settings_file_path: str # Casting here just so we can build an actual UserProfileDict once we're done validating # Fallback to default settings if some are missing from the file. This happens when new settings are added. loaded_settings = cast( - UserProfileDict, { + UserProfileDict, + { **DEFAULT_PROFILE, **toml.load(file), }, ) # TODO: Data Validation / fallbacks ? - autosplit.settings_dict = UserProfileDict(**loaded_settings) + autosplit.settings_dict = UserProfileDict(**loaded_settings) # type: ignore[misc] autosplit.last_loaded_settings = autosplit.settings_dict autosplit.x_spinbox.setValue(autosplit.settings_dict["capture_region"]["x"]) @@ -130,12 +139,9 @@ def __load_settings_from_file(autosplit: AutoSplit, load_settings_file_path: str remove_all_hotkeys() if not autosplit.is_auto_controlled: for hotkey, hotkey_name in [(hotkey, f"{hotkey}_hotkey") for hotkey in HOTKEYS]: - if autosplit.settings_dict[hotkey_name]: # pyright: ignore[reportGeneralTypeIssues] - set_hotkey( - autosplit, - hotkey, - cast(str, autosplit.settings_dict[hotkey_name]), # pyright: ignore[reportGeneralTypeIssues] - ) + hotkey_value = cast(str, autosplit.settings_dict[hotkey_name]) # pyright: ignore[reportGeneralTypeIssues] + if hotkey_value: + set_hotkey(autosplit, hotkey, hotkey_value) change_capture_method(cast(CaptureMethodEnum, autosplit.settings_dict["capture_method"]), autosplit) if autosplit.settings_dict["capture_method"] != CaptureMethodEnum.VIDEO_CAPTURE_DEVICE: @@ -192,12 +198,15 @@ def load_check_for_updates_on_open(autosplit: AutoSplit): Retrieve the "Check For Updates On Open" QSettings and set the checkbox state These are only global settings values. They are not *toml settings values. """ - value = QtCore \ - .QSettings("AutoSplit", "Check For Updates On Open") \ - .value("check_for_updates_on_open", True, type=bool) # Type not infered by PySide6 # TODO: Report this issue upstream - autosplit.action_check_for_updates_on_open.setChecked(value) # pyright: ignore[reportGeneralTypeIssues] + value = cast( + bool, + QtCore + .QSettings("AutoSplit", "Check For Updates On Open") + .value("check_for_updates_on_open", True, type=bool), + ) + autosplit.action_check_for_updates_on_open.setChecked(value) def set_check_for_updates_on_open(design_window: design.Ui_MainWindow, value: bool): diff --git a/src/utils.py b/src/utils.py index de827fb2..f1360b26 100644 --- a/src/utils.py +++ b/src/utils.py @@ -5,14 +5,14 @@ import ctypes.wintypes import os import sys -from collections.abc import Callable, Iterable +from collections.abc import Callable, Generator, Iterable from enum import IntEnum from platform import version from threading import Thread from typing import TYPE_CHECKING, Any, TypeVar, cast -import cv2 import win32ui +from cv2.typing import MatLike from typing_extensions import TypeGuard from win32 import win32gui from winsdk.windows.ai.machinelearning import LearningModelDevice, LearningModelDeviceKind @@ -24,24 +24,27 @@ # Source does not exist, keep this under TYPE_CHECKING from _win32typing import PyCDC # pyright: ignore[reportMissingModuleSource] +_T = TypeVar("_T") + + DWMWA_EXTENDED_FRAME_BOUNDS = 9 MAXBYTE = 255 -RGB_CHANNEL_COUNT = 3 -"""How many channels in an RGB image""" -RGBA_CHANNEL_COUNT = 4 +BGR_CHANNEL_COUNT = 3 """How many channels in an RGB image""" +BGRA_CHANNEL_COUNT = 4 +"""How many channels in an RGBA image""" class ImageShape(IntEnum): - X = 0 - Y = 1 + Y = 0 + X = 1 Channels = 2 class ColorChannel(IntEnum): - Red = 0 + Blue = 0 Green = 1 - Blue = 2 + Red = 2 Alpha = 3 @@ -60,16 +63,15 @@ def is_digit(value: str | int | None): return False -def is_valid_image(image: cv2.Mat | None) -> TypeGuard[cv2.Mat]: +def is_valid_image(image: MatLike | None) -> TypeGuard[MatLike]: return image is not None and bool(image.size) -def is_valid_hwnd(hwnd: int): +def is_valid_hwnd(hwnd: int) -> bool: """Validate the hwnd points to a valid window and not the desktop or whatever window obtained with `""`.""" if not hwnd: return False if sys.platform == "win32": - # TODO: Fix stubs, IsWindow should return a boolean return bool(win32gui.IsWindow(hwnd) and win32gui.GetWindowText(hwnd)) return True @@ -126,9 +128,9 @@ def get_direct3d_device(): async def init_mediacapture(): await (media_capture.initialize_async() or asyncio.sleep(0)) + asyncio.run(init_mediacapture()) - direct_3d_device = media_capture.media_capture_settings and \ - media_capture.media_capture_settings.direct3_d11_device + direct_3d_device = media_capture.media_capture_settings and media_capture.media_capture_settings.direct3_d11_device if not direct_3d_device: try: # May be problematic? https://github.com/pywinrt/python-winsdk/issues/11#issuecomment-1315345318 @@ -157,6 +159,7 @@ def fire_and_forget(func: Callable[..., Any]): Uses asyncio on Linux because of a `Segmentation fault (core dumped)` """ + def wrapped(*args: Any, **kwargs: Any): if sys.platform == "win32": thread = Thread(target=func, args=args, kwargs=kwargs) @@ -167,6 +170,14 @@ def wrapped(*args: Any, **kwargs: Any): return wrapped +def flatten(nested_iterable: Iterable[Iterable[_T]]) -> Generator[_T, None, None]: + return ( + item for flatten + in nested_iterable + for item in flatten + ) + + # Environment specifics WINDOWS_BUILD_NUMBER = int(version().split(".")[-1]) if sys.platform == "win32" else -1 FIRST_WIN_11_BUILD = 22000 diff --git a/typings/cv2/Error.pyi b/typings/cv2/Error.pyi deleted file mode 100644 index 40853539..00000000 --- a/typings/cv2/Error.pyi +++ /dev/null @@ -1,110 +0,0 @@ -BAD_ALIGN: int -BAD_ALPHA_CHANNEL: int -BAD_CALL_BACK: int -BAD_COI: int -BAD_DATA_PTR: int -BAD_DEPTH: int -BAD_IMAGE_SIZE: int -BAD_MODEL_OR_CH_SEQ: int -BAD_NUM_CHANNEL1U: int -BAD_NUM_CHANNELS: int -BAD_OFFSET: int -BAD_ORDER: int -BAD_ORIGIN: int -BAD_ROISIZE: int -BAD_STEP: int -BAD_TILE_SIZE: int -BadAlign: int -BadAlphaChannel: int -BadCOI: int -BadCallBack: int -BadDataPtr: int -BadDepth: int -BadImageSize: int -BadModelOrChSeq: int -BadNumChannel1U: int -BadNumChannels: int -BadOffset: int -BadOrder: int -BadOrigin: int -BadROISize: int -BadStep: int -BadTileSize: int -GPU_API_CALL_ERROR: int -GPU_NOT_SUPPORTED: int -GpuApiCallError: int -GpuNotSupported: int -HEADER_IS_NULL: int -HeaderIsNull: int -MASK_IS_TILED: int -MaskIsTiled: int -OPEN_CLAPI_CALL_ERROR: int -OPEN_CLDOUBLE_NOT_SUPPORTED: int -OPEN_CLINIT_ERROR: int -OPEN_CLNO_AMDBLAS_FFT: int -OPEN_GL_API_CALL_ERROR: int -OPEN_GL_NOT_SUPPORTED: int -OpenCLApiCallError: int -OpenCLDoubleNotSupported: int -OpenCLInitError: int -OpenCLNoAMDBlasFft: int -OpenGlApiCallError: int -OpenGlNotSupported: int -STS_ASSERT: int -STS_AUTO_TRACE: int -STS_BACK_TRACE: int -STS_BAD_ARG: int -STS_BAD_FLAG: int -STS_BAD_FUNC: int -STS_BAD_MASK: int -STS_BAD_MEM_BLOCK: int -STS_BAD_POINT: int -STS_BAD_SIZE: int -STS_DIV_BY_ZERO: int -STS_ERROR: int -STS_FILTER_OFFSET_ERR: int -STS_FILTER_STRUCT_CONTENT_ERR: int -STS_INPLACE_NOT_SUPPORTED: int -STS_INTERNAL: int -STS_KERNEL_STRUCT_CONTENT_ERR: int -STS_NOT_IMPLEMENTED: int -STS_NO_CONV: int -STS_NO_MEM: int -STS_NULL_PTR: int -STS_OBJECT_NOT_FOUND: int -STS_OK: int -STS_OUT_OF_RANGE: int -STS_PARSE_ERROR: int -STS_UNMATCHED_FORMATS: int -STS_UNMATCHED_SIZES: int -STS_UNSUPPORTED_FORMAT: int -STS_VEC_LENGTH_ERR: int -StsAssert: int -StsAutoTrace: int -StsBackTrace: int -StsBadArg: int -StsBadFlag: int -StsBadFunc: int -StsBadMask: int -StsBadMemBlock: int -StsBadPoint: int -StsBadSize: int -StsDivByZero: int -StsError: int -StsFilterOffsetErr: int -StsFilterStructContentErr: int -StsInplaceNotSupported: int -StsInternal: int -StsKernelStructContentErr: int -StsNoConv: int -StsNoMem: int -StsNotImplemented: int -StsNullPtr: int -StsObjectNotFound: int -StsOk: int -StsOutOfRange: int -StsParseError: int -StsUnmatchedFormats: int -StsUnmatchedSizes: int -StsUnsupportedFormat: int -StsVecLengthErr: int diff --git a/typings/cv2/__init__.pyi b/typings/cv2/__init__.pyi index 30881037..4fa2b158 100644 --- a/typings/cv2/__init__.pyi +++ b/typings/cv2/__init__.pyi @@ -1,39 +1,40 @@ import typing +import cv2.aruco import cv2.cuda import cv2.gapi import cv2.gapi.streaming import cv2.typing -import numpy # noqa: ICN001 +import numpy from cv2 import ( Error as Error, - data as data, + aruco as aruco, + barcode as barcode, + cuda as cuda, + detail as detail, + dnn as dnn, + fisheye as fisheye, + flann as flann, gapi as gapi, - mat_wrapper as mat_wrapper, - misc as misc, + ipp as ipp, + ml as ml, + ocl as ocl, + ogl as ogl, + parallel as parallel, + samples as samples, + segmentation as segmentation, utils as utils, - version as version, + videoio_registry as videoio_registry, ) -from typing_extensions import TypeAlias - -Mat: TypeAlias = cv2.typing.MatLike - - -class error(Exception): - code: typing.ClassVar[int] - err: typing.ClassVar[str] - file: typing.ClassVar[str] - func: typing.ClassVar[str] - line: typing.ClassVar[int] - msg: typing.ClassVar[str] - +from cv2.mat_wrapper import Mat as Mat # Enumerations SORT_EVERY_ROW: int SORT_EVERY_COLUMN: int SORT_ASCENDING: int SORT_DESCENDING: int -SortFlags = int # One of [SORT_EVERY_ROW, SORT_EVERY_COLUMN, SORT_ASCENDING, SORT_DESCENDING] +SortFlags = int +"""One of [SORT_EVERY_ROW, SORT_EVERY_COLUMN, SORT_ASCENDING, SORT_DESCENDING]""" COVAR_SCRAMBLED: int COVAR_NORMAL: int @@ -41,39 +42,58 @@ COVAR_USE_AVG: int COVAR_SCALE: int COVAR_ROWS: int COVAR_COLS: int -CovarFlags = int # One of [COVAR_SCRAMBLED, COVAR_NORMAL, COVAR_USE_AVG, COVAR_SCALE, COVAR_ROWS, COVAR_COLS] +CovarFlags = int +"""One of [COVAR_SCRAMBLED, COVAR_NORMAL, COVAR_USE_AVG, COVAR_SCALE, COVAR_ROWS, COVAR_COLS]""" KMEANS_RANDOM_CENTERS: int KMEANS_PP_CENTERS: int KMEANS_USE_INITIAL_LABELS: int -KmeansFlags = int # One of [KMEANS_RANDOM_CENTERS, KMEANS_PP_CENTERS, KMEANS_USE_INITIAL_LABELS] +KmeansFlags = int +"""One of [KMEANS_RANDOM_CENTERS, KMEANS_PP_CENTERS, KMEANS_USE_INITIAL_LABELS]""" REDUCE_SUM: int REDUCE_AVG: int REDUCE_MAX: int REDUCE_MIN: int REDUCE_SUM2: int -ReduceTypes = int # One of [REDUCE_SUM, REDUCE_AVG, REDUCE_MAX, REDUCE_MIN, REDUCE_SUM2] +ReduceTypes = int +"""One of [REDUCE_SUM, REDUCE_AVG, REDUCE_MAX, REDUCE_MIN, REDUCE_SUM2]""" ROTATE_90_CLOCKWISE: int ROTATE_180: int ROTATE_90_COUNTERCLOCKWISE: int -RotateFlags = int # One of [ROTATE_90_CLOCKWISE, ROTATE_180, ROTATE_90_COUNTERCLOCKWISE] +RotateFlags = int +"""One of [ROTATE_90_CLOCKWISE, ROTATE_180, ROTATE_90_COUNTERCLOCKWISE]""" Param_INT: int +PARAM_INT: int Param_BOOLEAN: int +PARAM_BOOLEAN: int Param_REAL: int +PARAM_REAL: int Param_STRING: int +PARAM_STRING: int Param_MAT: int +PARAM_MAT: int Param_MAT_VECTOR: int +PARAM_MAT_VECTOR: int Param_ALGORITHM: int +PARAM_ALGORITHM: int Param_FLOAT: int +PARAM_FLOAT: int Param_UNSIGNED_INT: int +PARAM_UNSIGNED_INT: int Param_UINT64: int +PARAM_UINT64: int Param_UCHAR: int +PARAM_UCHAR: int Param_SCALAR: int -# One of [INT, BOOLEAN, REAL, STRING, MAT, MAT_VECTOR, ALGORITHM, FLOAT, UNSIGNED_INT, UINT64, UCHAR, SCALAR] +PARAM_SCALAR: int Param = int +"""One of [Param_INT, PARAM_INT, Param_BOOLEAN, PARAM_BOOLEAN, Param_REAL, PARAM_REAL, Param_STRING, PARAM_STRING, +Param_MAT, PARAM_MAT, Param_MAT_VECTOR, PARAM_MAT_VECTOR, Param_ALGORITHM, PARAM_ALGORITHM, Param_FLOAT, PARAM_FLOAT, +Param_UNSIGNED_INT, PARAM_UNSIGNED_INT, Param_UINT64, PARAM_UINT64, Param_UCHAR, PARAM_UCHAR, Param_SCALAR, +PARAM_SCALAR]""" DECOMP_LU: int DECOMP_SVD: int @@ -81,7 +101,8 @@ DECOMP_EIG: int DECOMP_CHOLESKY: int DECOMP_QR: int DECOMP_NORMAL: int -DecompTypes = int # One of [DECOMP_LU, DECOMP_SVD, DECOMP_EIG, DECOMP_CHOLESKY, DECOMP_QR, DECOMP_NORMAL] +DecompTypes = int +"""One of [DECOMP_LU, DECOMP_SVD, DECOMP_EIG, DECOMP_CHOLESKY, DECOMP_QR, DECOMP_NORMAL]""" NORM_INF: int NORM_L1: int @@ -92,9 +113,9 @@ NORM_HAMMING2: int NORM_TYPE_MASK: int NORM_RELATIVE: int NORM_MINMAX: int -# One of [NORM_INF, NORM_L1, NORM_L2, NORM_L2SQR, NORM_HAMMING, -# NORM_HAMMING2, NORM_TYPE_MASK, NORM_RELATIVE, NORM_MINMAX] NormTypes = int +"""One of [NORM_INF, NORM_L1, NORM_L2, NORM_L2SQR, NORM_HAMMING, NORM_HAMMING2, NORM_TYPE_MASK, NORM_RELATIVE, +NORM_MINMAX]""" CMP_EQ: int CMP_GT: int @@ -102,12 +123,14 @@ CMP_GE: int CMP_LT: int CMP_LE: int CMP_NE: int -CmpTypes = int # One of [CMP_EQ, CMP_GT, CMP_GE, CMP_LT, CMP_LE, CMP_NE] +CmpTypes = int +"""One of [CMP_EQ, CMP_GT, CMP_GE, CMP_LT, CMP_LE, CMP_NE]""" GEMM_1_T: int GEMM_2_T: int GEMM_3_T: int -GemmFlags = int # One of [GEMM_1_T, GEMM_2_T, GEMM_3_T] +GemmFlags = int +"""One of [GEMM_1_T, GEMM_2_T, GEMM_3_T]""" DFT_INVERSE: int DFT_SCALE: int @@ -117,9 +140,9 @@ DFT_REAL_OUTPUT: int DFT_COMPLEX_INPUT: int DCT_INVERSE: int DCT_ROWS: int -# One of [DFT_INVERSE, DFT_SCALE, DFT_ROWS, DFT_COMPLEX_OUTPUT, -# DFT_REAL_OUTPUT, DFT_COMPLEX_INPUT, DCT_INVERSE, DCT_ROWS] DftFlags = int +"""One of [DFT_INVERSE, DFT_SCALE, DFT_ROWS, DFT_COMPLEX_OUTPUT, DFT_REAL_OUTPUT, DFT_COMPLEX_INPUT, DCT_INVERSE, +DCT_ROWS]""" BORDER_CONSTANT: int BORDER_REPLICATE: int @@ -130,40 +153,43 @@ BORDER_TRANSPARENT: int BORDER_REFLECT101: int BORDER_DEFAULT: int BORDER_ISOLATED: int -# One of [BORDER_CONSTANT, BORDER_REPLICATE, BORDER_REFLECT, BORDER_WRAP, -# BORDER_REFLECT_101, BORDER_TRANSPARENT, BORDER_REFLECT101, -# BORDER_DEFAULT, BORDER_ISOLATED] BorderTypes = int +"""One of [BORDER_CONSTANT, BORDER_REPLICATE, BORDER_REFLECT, BORDER_WRAP, BORDER_REFLECT_101, BORDER_TRANSPARENT, +BORDER_REFLECT101, BORDER_DEFAULT, BORDER_ISOLATED]""" ACCESS_READ: int ACCESS_WRITE: int ACCESS_RW: int ACCESS_MASK: int ACCESS_FAST: int -AccessFlag = int # One of [ACCESS_READ, ACCESS_WRITE, ACCESS_RW, ACCESS_MASK, ACCESS_FAST] +AccessFlag = int +"""One of [ACCESS_READ, ACCESS_WRITE, ACCESS_RW, ACCESS_MASK, ACCESS_FAST]""" USAGE_DEFAULT: int USAGE_ALLOCATE_HOST_MEMORY: int USAGE_ALLOCATE_DEVICE_MEMORY: int USAGE_ALLOCATE_SHARED_MEMORY: int __UMAT_USAGE_FLAGS_32BIT: int -# One of [USAGE_DEFAULT, USAGE_ALLOCATE_HOST_MEMORY, -# USAGE_ALLOCATE_DEVICE_MEMORY, USAGE_ALLOCATE_SHARED_MEMORY, -# __UMAT_USAGE_FLAGS_32BIT] UMatUsageFlags = int +"""One of [USAGE_DEFAULT, USAGE_ALLOCATE_HOST_MEMORY, USAGE_ALLOCATE_DEVICE_MEMORY, USAGE_ALLOCATE_SHARED_MEMORY, +__UMAT_USAGE_FLAGS_32BIT]""" +SOLVELP_LOST: int SOLVELP_UNBOUNDED: int SOLVELP_UNFEASIBLE: int SOLVELP_SINGLE: int SOLVELP_MULTI: int -SolveLPResult = int # One of [SOLVELP_UNBOUNDED, SOLVELP_UNFEASIBLE, SOLVELP_SINGLE, SOLVELP_MULTI] +SolveLPResult = int +"""One of [SOLVELP_LOST, SOLVELP_UNBOUNDED, SOLVELP_UNFEASIBLE, SOLVELP_SINGLE, SOLVELP_MULTI]""" QUAT_ASSUME_NOT_UNIT: int QUAT_ASSUME_UNIT: int -QuatAssumeType = int # One of [QUAT_ASSUME_NOT_UNIT, QUAT_ASSUME_UNIT] +QuatAssumeType = int +"""One of [QUAT_ASSUME_NOT_UNIT, QUAT_ASSUME_UNIT]""" FILTER_SCHARR: int -SpecialFilter = int # One of [FILTER_SCHARR] +SpecialFilter = int +"""One of [FILTER_SCHARR]""" MORPH_ERODE: int MORPH_DILATE: int @@ -173,14 +199,15 @@ MORPH_GRADIENT: int MORPH_TOPHAT: int MORPH_BLACKHAT: int MORPH_HITMISS: int -# One of [MORPH_ERODE, MORPH_DILATE, MORPH_OPEN, MORPH_CLOSE, -# MORPH_GRADIENT, MORPH_TOPHAT, MORPH_BLACKHAT, MORPH_HITMISS] MorphTypes = int +"""One of [MORPH_ERODE, MORPH_DILATE, MORPH_OPEN, MORPH_CLOSE, MORPH_GRADIENT, MORPH_TOPHAT, MORPH_BLACKHAT, +MORPH_HITMISS]""" MORPH_RECT: int MORPH_CROSS: int MORPH_ELLIPSE: int -MorphShapes = int # One of [MORPH_RECT, MORPH_CROSS, MORPH_ELLIPSE] +MorphShapes = int +"""One of [MORPH_RECT, MORPH_CROSS, MORPH_ELLIPSE]""" INTER_NEAREST: int INTER_LINEAR: int @@ -192,20 +219,21 @@ INTER_NEAREST_EXACT: int INTER_MAX: int WARP_FILL_OUTLIERS: int WARP_INVERSE_MAP: int -# One of [INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_AREA, -# INTER_LANCZOS4, INTER_LINEAR_EXACT, INTER_NEAREST_EXACT, INTER_MAX, -# WARP_FILL_OUTLIERS, WARP_INVERSE_MAP] InterpolationFlags = int +"""One of [INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_AREA, INTER_LANCZOS4, INTER_LINEAR_EXACT, +INTER_NEAREST_EXACT, INTER_MAX, WARP_FILL_OUTLIERS, WARP_INVERSE_MAP]""" WARP_POLAR_LINEAR: int WARP_POLAR_LOG: int -WarpPolarMode = int # One of [WARP_POLAR_LINEAR, WARP_POLAR_LOG] +WarpPolarMode = int +"""One of [WARP_POLAR_LINEAR, WARP_POLAR_LOG]""" INTER_BITS: int INTER_BITS2: int INTER_TAB_SIZE: int INTER_TAB_SIZE2: int -InterpolationMasks = int # One of [INTER_BITS, INTER_BITS2, INTER_TAB_SIZE, INTER_TAB_SIZE2] +InterpolationMasks = int +"""One of [INTER_BITS, INTER_BITS2, INTER_TAB_SIZE, INTER_TAB_SIZE2]""" DIST_USER: int DIST_L1: int @@ -215,12 +243,14 @@ DIST_L12: int DIST_FAIR: int DIST_WELSCH: int DIST_HUBER: int -DistanceTypes = int # One of [DIST_USER, DIST_L1, DIST_L2, DIST_C, DIST_L12, DIST_FAIR, DIST_WELSCH, DIST_HUBER] +DistanceTypes = int +"""One of [DIST_USER, DIST_L1, DIST_L2, DIST_C, DIST_L12, DIST_FAIR, DIST_WELSCH, DIST_HUBER]""" DIST_MASK_3: int DIST_MASK_5: int DIST_MASK_PRECISE: int -DistanceTransformMasks = int # One of [DIST_MASK_3, DIST_MASK_5, DIST_MASK_PRECISE] +DistanceTransformMasks = int +"""One of [DIST_MASK_3, DIST_MASK_5, DIST_MASK_PRECISE]""" THRESH_BINARY: int THRESH_BINARY_INV: int @@ -230,33 +260,38 @@ THRESH_TOZERO_INV: int THRESH_MASK: int THRESH_OTSU: int THRESH_TRIANGLE: int -# One of [THRESH_BINARY, THRESH_BINARY_INV, THRESH_TRUNC, THRESH_TOZERO, -# THRESH_TOZERO_INV, THRESH_MASK, THRESH_OTSU, THRESH_TRIANGLE] ThresholdTypes = int +"""One of [THRESH_BINARY, THRESH_BINARY_INV, THRESH_TRUNC, THRESH_TOZERO, THRESH_TOZERO_INV, THRESH_MASK, THRESH_OTSU, +THRESH_TRIANGLE]""" ADAPTIVE_THRESH_MEAN_C: int ADAPTIVE_THRESH_GAUSSIAN_C: int -AdaptiveThresholdTypes = int # One of [ADAPTIVE_THRESH_MEAN_C, ADAPTIVE_THRESH_GAUSSIAN_C] +AdaptiveThresholdTypes = int +"""One of [ADAPTIVE_THRESH_MEAN_C, ADAPTIVE_THRESH_GAUSSIAN_C]""" GC_BGD: int GC_FGD: int GC_PR_BGD: int GC_PR_FGD: int -GrabCutClasses = int # One of [GC_BGD, GC_FGD, GC_PR_BGD, GC_PR_FGD] +GrabCutClasses = int +"""One of [GC_BGD, GC_FGD, GC_PR_BGD, GC_PR_FGD]""" GC_INIT_WITH_RECT: int GC_INIT_WITH_MASK: int GC_EVAL: int GC_EVAL_FREEZE_MODEL: int -GrabCutModes = int # One of [GC_INIT_WITH_RECT, GC_INIT_WITH_MASK, GC_EVAL, GC_EVAL_FREEZE_MODEL] +GrabCutModes = int +"""One of [GC_INIT_WITH_RECT, GC_INIT_WITH_MASK, GC_EVAL, GC_EVAL_FREEZE_MODEL]""" DIST_LABEL_CCOMP: int DIST_LABEL_PIXEL: int -DistanceTransformLabelTypes = int # One of [DIST_LABEL_CCOMP, DIST_LABEL_PIXEL] +DistanceTransformLabelTypes = int +"""One of [DIST_LABEL_CCOMP, DIST_LABEL_PIXEL]""" FLOODFILL_FIXED_RANGE: int FLOODFILL_MASK_ONLY: int -FloodFillFlags = int # One of [FLOODFILL_FIXED_RANGE, FLOODFILL_MASK_ONLY] +FloodFillFlags = int +"""One of [FLOODFILL_FIXED_RANGE, FLOODFILL_MASK_ONLY]""" CC_STAT_LEFT: int CC_STAT_TOP: int @@ -264,8 +299,8 @@ CC_STAT_WIDTH: int CC_STAT_HEIGHT: int CC_STAT_AREA: int CC_STAT_MAX: int -# One of [CC_STAT_LEFT, CC_STAT_TOP, CC_STAT_WIDTH, CC_STAT_HEIGHT, CC_STAT_AREA, CC_STAT_MAX] ConnectedComponentsTypes = int +"""One of [CC_STAT_LEFT, CC_STAT_TOP, CC_STAT_WIDTH, CC_STAT_HEIGHT, CC_STAT_AREA, CC_STAT_MAX]""" CCL_DEFAULT: int CCL_WU: int @@ -274,39 +309,43 @@ CCL_BOLELLI: int CCL_SAUF: int CCL_BBDT: int CCL_SPAGHETTI: int -# One of [CCL_DEFAULT, CCL_WU, CCL_GRANA, CCL_BOLELLI, CCL_SAUF, CCL_BBDT, CCL_SPAGHETTI] ConnectedComponentsAlgorithmsTypes = int +"""One of [CCL_DEFAULT, CCL_WU, CCL_GRANA, CCL_BOLELLI, CCL_SAUF, CCL_BBDT, CCL_SPAGHETTI]""" RETR_EXTERNAL: int RETR_LIST: int RETR_CCOMP: int RETR_TREE: int RETR_FLOODFILL: int -RetrievalModes = int # One of [RETR_EXTERNAL, RETR_LIST, RETR_CCOMP, RETR_TREE, RETR_FLOODFILL] +RetrievalModes = int +"""One of [RETR_EXTERNAL, RETR_LIST, RETR_CCOMP, RETR_TREE, RETR_FLOODFILL]""" CHAIN_APPROX_NONE: int CHAIN_APPROX_SIMPLE: int CHAIN_APPROX_TC89_L1: int CHAIN_APPROX_TC89_KCOS: int -# One of [CHAIN_APPROX_NONE, CHAIN_APPROX_SIMPLE, CHAIN_APPROX_TC89_L1, CHAIN_APPROX_TC89_KCOS] ContourApproximationModes = int +"""One of [CHAIN_APPROX_NONE, CHAIN_APPROX_SIMPLE, CHAIN_APPROX_TC89_L1, CHAIN_APPROX_TC89_KCOS]""" CONTOURS_MATCH_I1: int CONTOURS_MATCH_I2: int CONTOURS_MATCH_I3: int -ShapeMatchModes = int # One of [CONTOURS_MATCH_I1, CONTOURS_MATCH_I2, CONTOURS_MATCH_I3] +ShapeMatchModes = int +"""One of [CONTOURS_MATCH_I1, CONTOURS_MATCH_I2, CONTOURS_MATCH_I3]""" HOUGH_STANDARD: int HOUGH_PROBABILISTIC: int HOUGH_MULTI_SCALE: int HOUGH_GRADIENT: int HOUGH_GRADIENT_ALT: int -HoughModes = int # One of [HOUGH_STANDARD, HOUGH_PROBABILISTIC, HOUGH_MULTI_SCALE, HOUGH_GRADIENT, HOUGH_GRADIENT_ALT] +HoughModes = int +"""One of [HOUGH_STANDARD, HOUGH_PROBABILISTIC, HOUGH_MULTI_SCALE, HOUGH_GRADIENT, HOUGH_GRADIENT_ALT]""" LSD_REFINE_NONE: int LSD_REFINE_STD: int LSD_REFINE_ADV: int -LineSegmentDetectorModes = int # One of [LSD_REFINE_NONE, LSD_REFINE_STD, LSD_REFINE_ADV] +LineSegmentDetectorModes = int +"""One of [LSD_REFINE_NONE, LSD_REFINE_STD, LSD_REFINE_ADV]""" HISTCMP_CORREL: int HISTCMP_CHISQR: int @@ -315,10 +354,9 @@ HISTCMP_BHATTACHARYYA: int HISTCMP_HELLINGER: int HISTCMP_CHISQR_ALT: int HISTCMP_KL_DIV: int -# One of [HISTCMP_CORREL, HISTCMP_CHISQR, HISTCMP_INTERSECT, -# HISTCMP_BHATTACHARYYA, HISTCMP_HELLINGER, HISTCMP_CHISQR_ALT, -# HISTCMP_KL_DIV] HistCompMethods = int +"""One of [HISTCMP_CORREL, HISTCMP_CHISQR, HISTCMP_INTERSECT, HISTCMP_BHATTACHARYYA, HISTCMP_HELLINGER, +HISTCMP_CHISQR_ALT, HISTCMP_KL_DIV]""" COLOR_BGR2BGRA: int COLOR_RGB2RGBA: int @@ -365,23 +403,35 @@ COLOR_RGB2XYZ: int COLOR_XYZ2BGR: int COLOR_XYZ2RGB: int COLOR_BGR2YCrCb: int +COLOR_BGR2YCR_CB: int COLOR_RGB2YCrCb: int +COLOR_RGB2YCR_CB: int COLOR_YCrCb2BGR: int +COLOR_YCR_CB2BGR: int COLOR_YCrCb2RGB: int +COLOR_YCR_CB2RGB: int COLOR_BGR2HSV: int COLOR_RGB2HSV: int COLOR_BGR2Lab: int +COLOR_BGR2LAB: int COLOR_RGB2Lab: int +COLOR_RGB2LAB: int COLOR_BGR2Luv: int +COLOR_BGR2LUV: int COLOR_RGB2Luv: int +COLOR_RGB2LUV: int COLOR_BGR2HLS: int COLOR_RGB2HLS: int COLOR_HSV2BGR: int COLOR_HSV2RGB: int COLOR_Lab2BGR: int +COLOR_LAB2BGR: int COLOR_Lab2RGB: int +COLOR_LAB2RGB: int COLOR_Luv2BGR: int +COLOR_LUV2BGR: int COLOR_Luv2RGB: int +COLOR_LUV2RGB: int COLOR_HLS2BGR: int COLOR_HLS2RGB: int COLOR_BGR2HSV_FULL: int @@ -393,13 +443,21 @@ COLOR_HSV2RGB_FULL: int COLOR_HLS2BGR_FULL: int COLOR_HLS2RGB_FULL: int COLOR_LBGR2Lab: int +COLOR_LBGR2LAB: int COLOR_LRGB2Lab: int +COLOR_LRGB2LAB: int COLOR_LBGR2Luv: int +COLOR_LBGR2LUV: int COLOR_LRGB2Luv: int +COLOR_LRGB2LUV: int COLOR_Lab2LBGR: int +COLOR_LAB2LBGR: int COLOR_Lab2LRGB: int +COLOR_LAB2LRGB: int COLOR_Luv2LBGR: int +COLOR_LUV2LBGR: int COLOR_Luv2LRGB: int +COLOR_LUV2LRGB: int COLOR_BGR2YUV: int COLOR_RGB2YUV: int COLOR_YUV2BGR: int @@ -409,13 +467,17 @@ COLOR_YUV2BGR_NV12: int COLOR_YUV2RGB_NV21: int COLOR_YUV2BGR_NV21: int COLOR_YUV420sp2RGB: int +COLOR_YUV420SP2RGB: int COLOR_YUV420sp2BGR: int +COLOR_YUV420SP2BGR: int COLOR_YUV2RGBA_NV12: int COLOR_YUV2BGRA_NV12: int COLOR_YUV2RGBA_NV21: int COLOR_YUV2BGRA_NV21: int COLOR_YUV420sp2RGBA: int +COLOR_YUV420SP2RGBA: int COLOR_YUV420sp2BGRA: int +COLOR_YUV420SP2BGRA: int COLOR_YUV2RGB_YV12: int COLOR_YUV2BGR_YV12: int COLOR_YUV2RGB_IYUV: int @@ -423,7 +485,9 @@ COLOR_YUV2BGR_IYUV: int COLOR_YUV2RGB_I420: int COLOR_YUV2BGR_I420: int COLOR_YUV420p2RGB: int +COLOR_YUV420P2RGB: int COLOR_YUV420p2BGR: int +COLOR_YUV420P2BGR: int COLOR_YUV2RGBA_YV12: int COLOR_YUV2BGRA_YV12: int COLOR_YUV2RGBA_IYUV: int @@ -431,7 +495,9 @@ COLOR_YUV2BGRA_IYUV: int COLOR_YUV2RGBA_I420: int COLOR_YUV2BGRA_I420: int COLOR_YUV420p2RGBA: int +COLOR_YUV420P2RGBA: int COLOR_YUV420p2BGRA: int +COLOR_YUV420P2BGRA: int COLOR_YUV2GRAY_420: int COLOR_YUV2GRAY_NV21: int COLOR_YUV2GRAY_NV12: int @@ -439,7 +505,9 @@ COLOR_YUV2GRAY_YV12: int COLOR_YUV2GRAY_IYUV: int COLOR_YUV2GRAY_I420: int COLOR_YUV420sp2GRAY: int +COLOR_YUV420SP2GRAY: int COLOR_YUV420p2GRAY: int +COLOR_YUV420P2GRAY: int COLOR_YUV2RGB_UYVY: int COLOR_YUV2BGR_UYVY: int COLOR_YUV2RGB_Y422: int @@ -476,7 +544,9 @@ COLOR_YUV2GRAY_YVYU: int COLOR_YUV2GRAY_YUYV: int COLOR_YUV2GRAY_YUNV: int COLOR_RGBA2mRGBA: int +COLOR_RGBA2M_RGBA: int COLOR_mRGBA2RGBA: int +COLOR_M_RGBA2RGBA: int COLOR_RGB2YUV_I420: int COLOR_BGR2YUV_I420: int COLOR_RGB2YUV_IYUV: int @@ -490,134 +560,227 @@ COLOR_BGR2YUV_YV12: int COLOR_RGBA2YUV_YV12: int COLOR_BGRA2YUV_YV12: int COLOR_BayerBG2BGR: int +COLOR_BAYER_BG2BGR: int COLOR_BayerGB2BGR: int +COLOR_BAYER_GB2BGR: int COLOR_BayerRG2BGR: int +COLOR_BAYER_RG2BGR: int COLOR_BayerGR2BGR: int +COLOR_BAYER_GR2BGR: int COLOR_BayerRGGB2BGR: int +COLOR_BAYER_RGGB2BGR: int COLOR_BayerGRBG2BGR: int +COLOR_BAYER_GRBG2BGR: int COLOR_BayerBGGR2BGR: int +COLOR_BAYER_BGGR2BGR: int COLOR_BayerGBRG2BGR: int +COLOR_BAYER_GBRG2BGR: int COLOR_BayerRGGB2RGB: int +COLOR_BAYER_RGGB2RGB: int COLOR_BayerGRBG2RGB: int +COLOR_BAYER_GRBG2RGB: int COLOR_BayerBGGR2RGB: int +COLOR_BAYER_BGGR2RGB: int COLOR_BayerGBRG2RGB: int +COLOR_BAYER_GBRG2RGB: int COLOR_BayerBG2RGB: int +COLOR_BAYER_BG2RGB: int COLOR_BayerGB2RGB: int +COLOR_BAYER_GB2RGB: int COLOR_BayerRG2RGB: int +COLOR_BAYER_RG2RGB: int COLOR_BayerGR2RGB: int +COLOR_BAYER_GR2RGB: int COLOR_BayerBG2GRAY: int +COLOR_BAYER_BG2GRAY: int COLOR_BayerGB2GRAY: int +COLOR_BAYER_GB2GRAY: int COLOR_BayerRG2GRAY: int +COLOR_BAYER_RG2GRAY: int COLOR_BayerGR2GRAY: int +COLOR_BAYER_GR2GRAY: int COLOR_BayerRGGB2GRAY: int +COLOR_BAYER_RGGB2GRAY: int COLOR_BayerGRBG2GRAY: int +COLOR_BAYER_GRBG2GRAY: int COLOR_BayerBGGR2GRAY: int +COLOR_BAYER_BGGR2GRAY: int COLOR_BayerGBRG2GRAY: int +COLOR_BAYER_GBRG2GRAY: int COLOR_BayerBG2BGR_VNG: int +COLOR_BAYER_BG2BGR_VNG: int COLOR_BayerGB2BGR_VNG: int +COLOR_BAYER_GB2BGR_VNG: int COLOR_BayerRG2BGR_VNG: int +COLOR_BAYER_RG2BGR_VNG: int COLOR_BayerGR2BGR_VNG: int +COLOR_BAYER_GR2BGR_VNG: int COLOR_BayerRGGB2BGR_VNG: int +COLOR_BAYER_RGGB2BGR_VNG: int COLOR_BayerGRBG2BGR_VNG: int +COLOR_BAYER_GRBG2BGR_VNG: int COLOR_BayerBGGR2BGR_VNG: int +COLOR_BAYER_BGGR2BGR_VNG: int COLOR_BayerGBRG2BGR_VNG: int +COLOR_BAYER_GBRG2BGR_VNG: int COLOR_BayerRGGB2RGB_VNG: int +COLOR_BAYER_RGGB2RGB_VNG: int COLOR_BayerGRBG2RGB_VNG: int +COLOR_BAYER_GRBG2RGB_VNG: int COLOR_BayerBGGR2RGB_VNG: int +COLOR_BAYER_BGGR2RGB_VNG: int COLOR_BayerGBRG2RGB_VNG: int +COLOR_BAYER_GBRG2RGB_VNG: int COLOR_BayerBG2RGB_VNG: int +COLOR_BAYER_BG2RGB_VNG: int COLOR_BayerGB2RGB_VNG: int +COLOR_BAYER_GB2RGB_VNG: int COLOR_BayerRG2RGB_VNG: int +COLOR_BAYER_RG2RGB_VNG: int COLOR_BayerGR2RGB_VNG: int +COLOR_BAYER_GR2RGB_VNG: int COLOR_BayerBG2BGR_EA: int +COLOR_BAYER_BG2BGR_EA: int COLOR_BayerGB2BGR_EA: int +COLOR_BAYER_GB2BGR_EA: int COLOR_BayerRG2BGR_EA: int +COLOR_BAYER_RG2BGR_EA: int COLOR_BayerGR2BGR_EA: int +COLOR_BAYER_GR2BGR_EA: int COLOR_BayerRGGB2BGR_EA: int +COLOR_BAYER_RGGB2BGR_EA: int COLOR_BayerGRBG2BGR_EA: int +COLOR_BAYER_GRBG2BGR_EA: int COLOR_BayerBGGR2BGR_EA: int +COLOR_BAYER_BGGR2BGR_EA: int COLOR_BayerGBRG2BGR_EA: int +COLOR_BAYER_GBRG2BGR_EA: int COLOR_BayerRGGB2RGB_EA: int +COLOR_BAYER_RGGB2RGB_EA: int COLOR_BayerGRBG2RGB_EA: int +COLOR_BAYER_GRBG2RGB_EA: int COLOR_BayerBGGR2RGB_EA: int +COLOR_BAYER_BGGR2RGB_EA: int COLOR_BayerGBRG2RGB_EA: int +COLOR_BAYER_GBRG2RGB_EA: int COLOR_BayerBG2RGB_EA: int +COLOR_BAYER_BG2RGB_EA: int COLOR_BayerGB2RGB_EA: int +COLOR_BAYER_GB2RGB_EA: int COLOR_BayerRG2RGB_EA: int +COLOR_BAYER_RG2RGB_EA: int COLOR_BayerGR2RGB_EA: int +COLOR_BAYER_GR2RGB_EA: int COLOR_BayerBG2BGRA: int +COLOR_BAYER_BG2BGRA: int COLOR_BayerGB2BGRA: int +COLOR_BAYER_GB2BGRA: int COLOR_BayerRG2BGRA: int +COLOR_BAYER_RG2BGRA: int COLOR_BayerGR2BGRA: int +COLOR_BAYER_GR2BGRA: int COLOR_BayerRGGB2BGRA: int +COLOR_BAYER_RGGB2BGRA: int COLOR_BayerGRBG2BGRA: int +COLOR_BAYER_GRBG2BGRA: int COLOR_BayerBGGR2BGRA: int +COLOR_BAYER_BGGR2BGRA: int COLOR_BayerGBRG2BGRA: int +COLOR_BAYER_GBRG2BGRA: int COLOR_BayerRGGB2RGBA: int +COLOR_BAYER_RGGB2RGBA: int COLOR_BayerGRBG2RGBA: int +COLOR_BAYER_GRBG2RGBA: int COLOR_BayerBGGR2RGBA: int +COLOR_BAYER_BGGR2RGBA: int COLOR_BayerGBRG2RGBA: int +COLOR_BAYER_GBRG2RGBA: int COLOR_BayerBG2RGBA: int +COLOR_BAYER_BG2RGBA: int COLOR_BayerGB2RGBA: int +COLOR_BAYER_GB2RGBA: int COLOR_BayerRG2RGBA: int +COLOR_BAYER_RG2RGBA: int COLOR_BayerGR2RGBA: int +COLOR_BAYER_GR2RGBA: int COLOR_COLORCVT_MAX: int -ColorConversionCodes = int # One of [COLOR_BGR2BGRA, COLOR_RGB2RGBA, COLOR_BGRA2BGR, COLOR_RGBA2RGB, COLOR_BGR2RGBA, -# COLOR_RGB2BGRA, COLOR_RGBA2BGR, COLOR_BGRA2RGB, COLOR_BGR2RGB, COLOR_RGB2BGR, COLOR_BGRA2RGBA, COLOR_RGBA2BGRA, -# COLOR_BGR2GRAY, COLOR_RGB2GRAY, COLOR_GRAY2BGR, COLOR_GRAY2RGB, COLOR_GRAY2BGRA, COLOR_GRAY2RGBA, COLOR_BGRA2GRAY, -# COLOR_RGBA2GRAY, COLOR_BGR2BGR565, COLOR_RGB2BGR565, COLOR_BGR5652BGR, COLOR_BGR5652RGB, COLOR_BGRA2BGR565, -# COLOR_RGBA2BGR565, COLOR_BGR5652BGRA, COLOR_BGR5652RGBA, COLOR_GRAY2BGR565, COLOR_BGR5652GRAY, COLOR_BGR2BGR555, -# COLOR_RGB2BGR555, COLOR_BGR5552BGR, COLOR_BGR5552RGB, COLOR_BGRA2BGR555, COLOR_RGBA2BGR555, COLOR_BGR5552BGRA, -# COLOR_BGR5552RGBA, COLOR_GRAY2BGR555, COLOR_BGR5552GRAY, COLOR_BGR2XYZ, COLOR_RGB2XYZ, COLOR_XYZ2BGR, COLOR_XYZ2RGB, -# COLOR_BGR2YCrCb, COLOR_RGB2YCrCb, COLOR_YCrCb2BGR, COLOR_YCrCb2RGB, COLOR_BGR2HSV, COLOR_RGB2HSV, COLOR_BGR2Lab, -# COLOR_RGB2Lab, COLOR_BGR2Luv, COLOR_RGB2Luv, COLOR_BGR2HLS, COLOR_RGB2HLS, COLOR_HSV2BGR, COLOR_HSV2RGB, -# COLOR_Lab2BGR, COLOR_Lab2RGB, COLOR_Luv2BGR, COLOR_Luv2RGB, COLOR_HLS2BGR, COLOR_HLS2RGB, COLOR_BGR2HSV_FULL, -# COLOR_RGB2HSV_FULL, COLOR_BGR2HLS_FULL, COLOR_RGB2HLS_FULL, COLOR_HSV2BGR_FULL, COLOR_HSV2RGB_FULL, -# COLOR_HLS2BGR_FULL, COLOR_HLS2RGB_FULL, COLOR_LBGR2Lab, COLOR_LRGB2Lab, COLOR_LBGR2Luv, COLOR_LRGB2Luv, -# COLOR_Lab2LBGR, COLOR_Lab2LRGB, COLOR_Luv2LBGR, COLOR_Luv2LRGB, COLOR_BGR2YUV, COLOR_RGB2YUV, COLOR_YUV2BGR, -# COLOR_YUV2RGB, COLOR_YUV2RGB_NV12, COLOR_YUV2BGR_NV12, COLOR_YUV2RGB_NV21, COLOR_YUV2BGR_NV21, COLOR_YUV420sp2RGB, -# COLOR_YUV420sp2BGR, COLOR_YUV2RGBA_NV12, COLOR_YUV2BGRA_NV12, COLOR_YUV2RGBA_NV21, COLOR_YUV2BGRA_NV21, -# COLOR_YUV420sp2RGBA, COLOR_YUV420sp2BGRA, COLOR_YUV2RGB_YV12, COLOR_YUV2BGR_YV12, COLOR_YUV2RGB_IYUV, -# COLOR_YUV2BGR_IYUV, COLOR_YUV2RGB_I420, COLOR_YUV2BGR_I420, COLOR_YUV420p2RGB, COLOR_YUV420p2BGR, COLOR_YUV2RGBA_YV12, -# COLOR_YUV2BGRA_YV12, COLOR_YUV2RGBA_IYUV, COLOR_YUV2BGRA_IYUV, COLOR_YUV2RGBA_I420, COLOR_YUV2BGRA_I420, -# COLOR_YUV420p2RGBA, COLOR_YUV420p2BGRA, COLOR_YUV2GRAY_420, COLOR_YUV2GRAY_NV21, COLOR_YUV2GRAY_NV12, -# COLOR_YUV2GRAY_YV12, COLOR_YUV2GRAY_IYUV, COLOR_YUV2GRAY_I420, COLOR_YUV420sp2GRAY, COLOR_YUV420p2GRAY, -# COLOR_YUV2RGB_UYVY, COLOR_YUV2BGR_UYVY, COLOR_YUV2RGB_Y422, COLOR_YUV2BGR_Y422, COLOR_YUV2RGB_UYNV, -# COLOR_YUV2BGR_UYNV, COLOR_YUV2RGBA_UYVY, COLOR_YUV2BGRA_UYVY, COLOR_YUV2RGBA_Y422, COLOR_YUV2BGRA_Y422, -# COLOR_YUV2RGBA_UYNV, COLOR_YUV2BGRA_UYNV, COLOR_YUV2RGB_YUY2, COLOR_YUV2BGR_YUY2, COLOR_YUV2RGB_YVYU, -# COLOR_YUV2BGR_YVYU, COLOR_YUV2RGB_YUYV, COLOR_YUV2BGR_YUYV, COLOR_YUV2RGB_YUNV, COLOR_YUV2BGR_YUNV, -# COLOR_YUV2RGBA_YUY2, COLOR_YUV2BGRA_YUY2, COLOR_YUV2RGBA_YVYU, COLOR_YUV2BGRA_YVYU, COLOR_YUV2RGBA_YUYV, -# COLOR_YUV2BGRA_YUYV, COLOR_YUV2RGBA_YUNV, COLOR_YUV2BGRA_YUNV, COLOR_YUV2GRAY_UYVY, COLOR_YUV2GRAY_YUY2, -# COLOR_YUV2GRAY_Y422, COLOR_YUV2GRAY_UYNV, COLOR_YUV2GRAY_YVYU, COLOR_YUV2GRAY_YUYV, COLOR_YUV2GRAY_YUNV, -# COLOR_RGBA2mRGBA, COLOR_mRGBA2RGBA, COLOR_RGB2YUV_I420, COLOR_BGR2YUV_I420, COLOR_RGB2YUV_IYUV, COLOR_BGR2YUV_IYUV, -# COLOR_RGBA2YUV_I420, COLOR_BGRA2YUV_I420, COLOR_RGBA2YUV_IYUV, COLOR_BGRA2YUV_IYUV, COLOR_RGB2YUV_YV12, -# COLOR_BGR2YUV_YV12, COLOR_RGBA2YUV_YV12, COLOR_BGRA2YUV_YV12, COLOR_BayerBG2BGR, COLOR_BayerGB2BGR, COLOR_BayerRG2BGR, -# COLOR_BayerGR2BGR, COLOR_BayerRGGB2BGR, COLOR_BayerGRBG2BGR, COLOR_BayerBGGR2BGR, COLOR_BayerGBRG2BGR, -# COLOR_BayerRGGB2RGB, COLOR_BayerGRBG2RGB, COLOR_BayerBGGR2RGB, COLOR_BayerGBRG2RGB, COLOR_BayerBG2RGB, -# COLOR_BayerGB2RGB, COLOR_BayerRG2RGB, COLOR_BayerGR2RGB, COLOR_BayerBG2GRAY, COLOR_BayerGB2GRAY, COLOR_BayerRG2GRAY, -# COLOR_BayerGR2GRAY, COLOR_BayerRGGB2GRAY, COLOR_BayerGRBG2GRAY, COLOR_BayerBGGR2GRAY, COLOR_BayerGBRG2GRAY, -# COLOR_BayerBG2BGR_VNG, COLOR_BayerGB2BGR_VNG, COLOR_BayerRG2BGR_VNG, COLOR_BayerGR2BGR_VNG, COLOR_BayerRGGB2BGR_VNG, -# COLOR_BayerGRBG2BGR_VNG, COLOR_BayerBGGR2BGR_VNG, COLOR_BayerGBRG2BGR_VNG, COLOR_BayerRGGB2RGB_VNG, -# COLOR_BayerGRBG2RGB_VNG, COLOR_BayerBGGR2RGB_VNG, COLOR_BayerGBRG2RGB_VNG, COLOR_BayerBG2RGB_VNG, -# COLOR_BayerGB2RGB_VNG, COLOR_BayerRG2RGB_VNG, COLOR_BayerGR2RGB_VNG, COLOR_BayerBG2BGR_EA, COLOR_BayerGB2BGR_EA, -# COLOR_BayerRG2BGR_EA, COLOR_BayerGR2BGR_EA, COLOR_BayerRGGB2BGR_EA, COLOR_BayerGRBG2BGR_EA, COLOR_BayerBGGR2BGR_EA, -# COLOR_BayerGBRG2BGR_EA, COLOR_BayerRGGB2RGB_EA, COLOR_BayerGRBG2RGB_EA, COLOR_BayerBGGR2RGB_EA, -# COLOR_BayerGBRG2RGB_EA, COLOR_BayerBG2RGB_EA, COLOR_BayerGB2RGB_EA, COLOR_BayerRG2RGB_EA, COLOR_BayerGR2RGB_EA, -# COLOR_BayerBG2BGRA, COLOR_BayerGB2BGRA, COLOR_BayerRG2BGRA, COLOR_BayerGR2BGRA, COLOR_BayerRGGB2BGRA, -# COLOR_BayerGRBG2BGRA, COLOR_BayerBGGR2BGRA, COLOR_BayerGBRG2BGRA, COLOR_BayerRGGB2RGBA, COLOR_BayerGRBG2RGBA, -# COLOR_BayerBGGR2RGBA, COLOR_BayerGBRG2RGBA, COLOR_BayerBG2RGBA, COLOR_BayerGB2RGBA, COLOR_BayerRG2RGBA, -# COLOR_BayerGR2RGBA, COLOR_COLORCVT_MAX] +ColorConversionCodes = int +"""One of [COLOR_BGR2BGRA, COLOR_RGB2RGBA, COLOR_BGRA2BGR, COLOR_RGBA2RGB, COLOR_BGR2RGBA, COLOR_RGB2BGRA, +COLOR_RGBA2BGR, COLOR_BGRA2RGB, COLOR_BGR2RGB, COLOR_RGB2BGR, COLOR_BGRA2RGBA, COLOR_RGBA2BGRA, COLOR_BGR2GRAY, +COLOR_RGB2GRAY, COLOR_GRAY2BGR, COLOR_GRAY2RGB, COLOR_GRAY2BGRA, COLOR_GRAY2RGBA, COLOR_BGRA2GRAY, COLOR_RGBA2GRAY, +COLOR_BGR2BGR565, COLOR_RGB2BGR565, COLOR_BGR5652BGR, COLOR_BGR5652RGB, COLOR_BGRA2BGR565, COLOR_RGBA2BGR565, +COLOR_BGR5652BGRA, COLOR_BGR5652RGBA, COLOR_GRAY2BGR565, COLOR_BGR5652GRAY, COLOR_BGR2BGR555, COLOR_RGB2BGR555, +COLOR_BGR5552BGR, COLOR_BGR5552RGB, COLOR_BGRA2BGR555, COLOR_RGBA2BGR555, COLOR_BGR5552BGRA, COLOR_BGR5552RGBA, +COLOR_GRAY2BGR555, COLOR_BGR5552GRAY, COLOR_BGR2XYZ, COLOR_RGB2XYZ, COLOR_XYZ2BGR, COLOR_XYZ2RGB, COLOR_BGR2YCrCb, +COLOR_BGR2YCR_CB, COLOR_RGB2YCrCb, COLOR_RGB2YCR_CB, COLOR_YCrCb2BGR, COLOR_YCR_CB2BGR, COLOR_YCrCb2RGB, +COLOR_YCR_CB2RGB, COLOR_BGR2HSV, COLOR_RGB2HSV, COLOR_BGR2Lab, COLOR_BGR2LAB, COLOR_RGB2Lab, COLOR_RGB2LAB, +COLOR_BGR2Luv, COLOR_BGR2LUV, COLOR_RGB2Luv, COLOR_RGB2LUV, COLOR_BGR2HLS, COLOR_RGB2HLS, COLOR_HSV2BGR, COLOR_HSV2RGB, +COLOR_Lab2BGR, COLOR_LAB2BGR, COLOR_Lab2RGB, COLOR_LAB2RGB, COLOR_Luv2BGR, COLOR_LUV2BGR, COLOR_Luv2RGB, COLOR_LUV2RGB, +COLOR_HLS2BGR, COLOR_HLS2RGB, COLOR_BGR2HSV_FULL, COLOR_RGB2HSV_FULL, COLOR_BGR2HLS_FULL, COLOR_RGB2HLS_FULL, +COLOR_HSV2BGR_FULL, COLOR_HSV2RGB_FULL, COLOR_HLS2BGR_FULL, COLOR_HLS2RGB_FULL, COLOR_LBGR2Lab, COLOR_LBGR2LAB, +COLOR_LRGB2Lab, COLOR_LRGB2LAB, COLOR_LBGR2Luv, COLOR_LBGR2LUV, COLOR_LRGB2Luv, COLOR_LRGB2LUV, COLOR_Lab2LBGR, +COLOR_LAB2LBGR, COLOR_Lab2LRGB, COLOR_LAB2LRGB, COLOR_Luv2LBGR, COLOR_LUV2LBGR, COLOR_Luv2LRGB, COLOR_LUV2LRGB, +COLOR_BGR2YUV, COLOR_RGB2YUV, COLOR_YUV2BGR, COLOR_YUV2RGB, COLOR_YUV2RGB_NV12, COLOR_YUV2BGR_NV12, COLOR_YUV2RGB_NV21, +COLOR_YUV2BGR_NV21, COLOR_YUV420sp2RGB, COLOR_YUV420SP2RGB, COLOR_YUV420sp2BGR, COLOR_YUV420SP2BGR, COLOR_YUV2RGBA_NV12, +COLOR_YUV2BGRA_NV12, COLOR_YUV2RGBA_NV21, COLOR_YUV2BGRA_NV21, COLOR_YUV420sp2RGBA, COLOR_YUV420SP2RGBA, +COLOR_YUV420sp2BGRA, COLOR_YUV420SP2BGRA, COLOR_YUV2RGB_YV12, COLOR_YUV2BGR_YV12, COLOR_YUV2RGB_IYUV, +COLOR_YUV2BGR_IYUV, COLOR_YUV2RGB_I420, COLOR_YUV2BGR_I420, COLOR_YUV420p2RGB, COLOR_YUV420P2RGB, COLOR_YUV420p2BGR, +COLOR_YUV420P2BGR, COLOR_YUV2RGBA_YV12, COLOR_YUV2BGRA_YV12, COLOR_YUV2RGBA_IYUV, COLOR_YUV2BGRA_IYUV, +COLOR_YUV2RGBA_I420, COLOR_YUV2BGRA_I420, COLOR_YUV420p2RGBA, COLOR_YUV420P2RGBA, COLOR_YUV420p2BGRA, +COLOR_YUV420P2BGRA, COLOR_YUV2GRAY_420, COLOR_YUV2GRAY_NV21, COLOR_YUV2GRAY_NV12, COLOR_YUV2GRAY_YV12, +COLOR_YUV2GRAY_IYUV, COLOR_YUV2GRAY_I420, COLOR_YUV420sp2GRAY, COLOR_YUV420SP2GRAY, COLOR_YUV420p2GRAY, +COLOR_YUV420P2GRAY, COLOR_YUV2RGB_UYVY, COLOR_YUV2BGR_UYVY, COLOR_YUV2RGB_Y422, COLOR_YUV2BGR_Y422, COLOR_YUV2RGB_UYNV, +COLOR_YUV2BGR_UYNV, COLOR_YUV2RGBA_UYVY, COLOR_YUV2BGRA_UYVY, COLOR_YUV2RGBA_Y422, COLOR_YUV2BGRA_Y422, +COLOR_YUV2RGBA_UYNV, COLOR_YUV2BGRA_UYNV, COLOR_YUV2RGB_YUY2, COLOR_YUV2BGR_YUY2, COLOR_YUV2RGB_YVYU, +COLOR_YUV2BGR_YVYU, COLOR_YUV2RGB_YUYV, COLOR_YUV2BGR_YUYV, COLOR_YUV2RGB_YUNV, COLOR_YUV2BGR_YUNV, COLOR_YUV2RGBA_YUY2, +COLOR_YUV2BGRA_YUY2, COLOR_YUV2RGBA_YVYU, COLOR_YUV2BGRA_YVYU, COLOR_YUV2RGBA_YUYV, COLOR_YUV2BGRA_YUYV, +COLOR_YUV2RGBA_YUNV, COLOR_YUV2BGRA_YUNV, COLOR_YUV2GRAY_UYVY, COLOR_YUV2GRAY_YUY2, COLOR_YUV2GRAY_Y422, +COLOR_YUV2GRAY_UYNV, COLOR_YUV2GRAY_YVYU, COLOR_YUV2GRAY_YUYV, COLOR_YUV2GRAY_YUNV, COLOR_RGBA2mRGBA, COLOR_RGBA2M_RGBA, +COLOR_mRGBA2RGBA, COLOR_M_RGBA2RGBA, COLOR_RGB2YUV_I420, COLOR_BGR2YUV_I420, COLOR_RGB2YUV_IYUV, COLOR_BGR2YUV_IYUV, +COLOR_RGBA2YUV_I420, COLOR_BGRA2YUV_I420, COLOR_RGBA2YUV_IYUV, COLOR_BGRA2YUV_IYUV, COLOR_RGB2YUV_YV12, +COLOR_BGR2YUV_YV12, COLOR_RGBA2YUV_YV12, COLOR_BGRA2YUV_YV12, COLOR_BayerBG2BGR, COLOR_BAYER_BG2BGR, COLOR_BayerGB2BGR, +COLOR_BAYER_GB2BGR, COLOR_BayerRG2BGR, COLOR_BAYER_RG2BGR, COLOR_BayerGR2BGR, COLOR_BAYER_GR2BGR, COLOR_BayerRGGB2BGR, +COLOR_BAYER_RGGB2BGR, COLOR_BayerGRBG2BGR, COLOR_BAYER_GRBG2BGR, COLOR_BayerBGGR2BGR, COLOR_BAYER_BGGR2BGR, +COLOR_BayerGBRG2BGR, COLOR_BAYER_GBRG2BGR, COLOR_BayerRGGB2RGB, COLOR_BAYER_RGGB2RGB, COLOR_BayerGRBG2RGB, +COLOR_BAYER_GRBG2RGB, COLOR_BayerBGGR2RGB, COLOR_BAYER_BGGR2RGB, COLOR_BayerGBRG2RGB, COLOR_BAYER_GBRG2RGB, +COLOR_BayerBG2RGB, COLOR_BAYER_BG2RGB, COLOR_BayerGB2RGB, COLOR_BAYER_GB2RGB, COLOR_BayerRG2RGB, COLOR_BAYER_RG2RGB, +COLOR_BayerGR2RGB, COLOR_BAYER_GR2RGB, COLOR_BayerBG2GRAY, COLOR_BAYER_BG2GRAY, COLOR_BayerGB2GRAY, COLOR_BAYER_GB2GRAY, +COLOR_BayerRG2GRAY, COLOR_BAYER_RG2GRAY, COLOR_BayerGR2GRAY, COLOR_BAYER_GR2GRAY, COLOR_BayerRGGB2GRAY, +COLOR_BAYER_RGGB2GRAY, COLOR_BayerGRBG2GRAY, COLOR_BAYER_GRBG2GRAY, COLOR_BayerBGGR2GRAY, COLOR_BAYER_BGGR2GRAY, +COLOR_BayerGBRG2GRAY, COLOR_BAYER_GBRG2GRAY, COLOR_BayerBG2BGR_VNG, COLOR_BAYER_BG2BGR_VNG, COLOR_BayerGB2BGR_VNG, +COLOR_BAYER_GB2BGR_VNG, COLOR_BayerRG2BGR_VNG, COLOR_BAYER_RG2BGR_VNG, COLOR_BayerGR2BGR_VNG, COLOR_BAYER_GR2BGR_VNG, +COLOR_BayerRGGB2BGR_VNG, COLOR_BAYER_RGGB2BGR_VNG, COLOR_BayerGRBG2BGR_VNG, COLOR_BAYER_GRBG2BGR_VNG, +COLOR_BayerBGGR2BGR_VNG, COLOR_BAYER_BGGR2BGR_VNG, COLOR_BayerGBRG2BGR_VNG, COLOR_BAYER_GBRG2BGR_VNG, +COLOR_BayerRGGB2RGB_VNG, COLOR_BAYER_RGGB2RGB_VNG, COLOR_BayerGRBG2RGB_VNG, COLOR_BAYER_GRBG2RGB_VNG, +COLOR_BayerBGGR2RGB_VNG, COLOR_BAYER_BGGR2RGB_VNG, COLOR_BayerGBRG2RGB_VNG, COLOR_BAYER_GBRG2RGB_VNG, +COLOR_BayerBG2RGB_VNG, COLOR_BAYER_BG2RGB_VNG, COLOR_BayerGB2RGB_VNG, COLOR_BAYER_GB2RGB_VNG, COLOR_BayerRG2RGB_VNG, +COLOR_BAYER_RG2RGB_VNG, COLOR_BayerGR2RGB_VNG, COLOR_BAYER_GR2RGB_VNG, COLOR_BayerBG2BGR_EA, COLOR_BAYER_BG2BGR_EA, +COLOR_BayerGB2BGR_EA, COLOR_BAYER_GB2BGR_EA, COLOR_BayerRG2BGR_EA, COLOR_BAYER_RG2BGR_EA, COLOR_BayerGR2BGR_EA, +COLOR_BAYER_GR2BGR_EA, COLOR_BayerRGGB2BGR_EA, COLOR_BAYER_RGGB2BGR_EA, COLOR_BayerGRBG2BGR_EA, COLOR_BAYER_GRBG2BGR_EA, +COLOR_BayerBGGR2BGR_EA, COLOR_BAYER_BGGR2BGR_EA, COLOR_BayerGBRG2BGR_EA, COLOR_BAYER_GBRG2BGR_EA, +COLOR_BayerRGGB2RGB_EA, COLOR_BAYER_RGGB2RGB_EA, COLOR_BayerGRBG2RGB_EA, COLOR_BAYER_GRBG2RGB_EA, +COLOR_BayerBGGR2RGB_EA, COLOR_BAYER_BGGR2RGB_EA, COLOR_BayerGBRG2RGB_EA, COLOR_BAYER_GBRG2RGB_EA, COLOR_BayerBG2RGB_EA, +COLOR_BAYER_BG2RGB_EA, COLOR_BayerGB2RGB_EA, COLOR_BAYER_GB2RGB_EA, COLOR_BayerRG2RGB_EA, COLOR_BAYER_RG2RGB_EA, +COLOR_BayerGR2RGB_EA, COLOR_BAYER_GR2RGB_EA, COLOR_BayerBG2BGRA, COLOR_BAYER_BG2BGRA, COLOR_BayerGB2BGRA, +COLOR_BAYER_GB2BGRA, COLOR_BayerRG2BGRA, COLOR_BAYER_RG2BGRA, COLOR_BayerGR2BGRA, COLOR_BAYER_GR2BGRA, +COLOR_BayerRGGB2BGRA, COLOR_BAYER_RGGB2BGRA, COLOR_BayerGRBG2BGRA, COLOR_BAYER_GRBG2BGRA, COLOR_BayerBGGR2BGRA, +COLOR_BAYER_BGGR2BGRA, COLOR_BayerGBRG2BGRA, COLOR_BAYER_GBRG2BGRA, COLOR_BayerRGGB2RGBA, COLOR_BAYER_RGGB2RGBA, +COLOR_BayerGRBG2RGBA, COLOR_BAYER_GRBG2RGBA, COLOR_BayerBGGR2RGBA, COLOR_BAYER_BGGR2RGBA, COLOR_BayerGBRG2RGBA, +COLOR_BAYER_GBRG2RGBA, COLOR_BayerBG2RGBA, COLOR_BAYER_BG2RGBA, COLOR_BayerGB2RGBA, COLOR_BAYER_GB2RGBA, +COLOR_BayerRG2RGBA, COLOR_BAYER_RG2RGBA, COLOR_BayerGR2RGBA, COLOR_BAYER_GR2RGBA, COLOR_COLORCVT_MAX]""" INTERSECT_NONE: int INTERSECT_PARTIAL: int INTERSECT_FULL: int -RectanglesIntersectTypes = int # One of [INTERSECT_NONE, INTERSECT_PARTIAL, INTERSECT_FULL] +RectanglesIntersectTypes = int +"""One of [INTERSECT_NONE, INTERSECT_PARTIAL, INTERSECT_FULL]""" FILLED: int LINE_4: int LINE_8: int LINE_AA: int -LineTypes = int # One of [FILLED, LINE_4, LINE_8, LINE_AA] +LineTypes = int +"""One of [FILLED, LINE_4, LINE_8, LINE_AA]""" FONT_HERSHEY_SIMPLEX: int FONT_HERSHEY_PLAIN: int @@ -628,10 +791,9 @@ FONT_HERSHEY_COMPLEX_SMALL: int FONT_HERSHEY_SCRIPT_SIMPLEX: int FONT_HERSHEY_SCRIPT_COMPLEX: int FONT_ITALIC: int -# One of [FONT_HERSHEY_SIMPLEX, FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, -# FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, FONT_HERSHEY_COMPLEX_SMALL, -# FONT_HERSHEY_SCRIPT_SIMPLEX, FONT_HERSHEY_SCRIPT_COMPLEX, FONT_ITALIC] HersheyFonts = int +"""One of [FONT_HERSHEY_SIMPLEX, FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, +FONT_HERSHEY_COMPLEX_SMALL, FONT_HERSHEY_SCRIPT_SIMPLEX, FONT_HERSHEY_SCRIPT_COMPLEX, FONT_ITALIC]""" MARKER_CROSS: int MARKER_TILTED_CROSS: int @@ -640,9 +802,9 @@ MARKER_DIAMOND: int MARKER_SQUARE: int MARKER_TRIANGLE_UP: int MARKER_TRIANGLE_DOWN: int -# One of [MARKER_CROSS, MARKER_TILTED_CROSS, MARKER_STAR, MARKER_DIAMOND, -# MARKER_SQUARE, MARKER_TRIANGLE_UP, MARKER_TRIANGLE_DOWN] MarkerTypes = int +"""One of [MARKER_CROSS, MARKER_TILTED_CROSS, MARKER_STAR, MARKER_DIAMOND, MARKER_SQUARE, MARKER_TRIANGLE_UP, +MARKER_TRIANGLE_DOWN]""" TM_SQDIFF: int TM_SQDIFF_NORMED: int @@ -650,7 +812,8 @@ TM_CCORR: int TM_CCORR_NORMED: int TM_CCOEFF: int TM_CCOEFF_NORMED: int -TemplateMatchModes = int # One of [TM_SQDIFF, TM_SQDIFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_CCOEFF, TM_CCOEFF_NORMED] +TemplateMatchModes = int +"""One of [TM_SQDIFF, TM_SQDIFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_CCOEFF, TM_CCOEFF_NORMED]""" COLORMAP_AUTUMN: int COLORMAP_BONE: int @@ -674,10 +837,11 @@ COLORMAP_TWILIGHT: int COLORMAP_TWILIGHT_SHIFTED: int COLORMAP_TURBO: int COLORMAP_DEEPGREEN: int -ColormapTypes = int # One of [COLORMAP_AUTUMN, COLORMAP_BONE, COLORMAP_JET, COLORMAP_WINTER, COLORMAP_RAINBOW, -# COLORMAP_OCEAN, COLORMAP_SUMMER, COLORMAP_SPRING, COLORMAP_COOL, COLORMAP_HSV, COLORMAP_PINK, COLORMAP_HOT, -# COLORMAP_PARULA, COLORMAP_MAGMA, COLORMAP_INFERNO, COLORMAP_PLASMA, COLORMAP_VIRIDIS, COLORMAP_CIVIDIS, -# COLORMAP_TWILIGHT, COLORMAP_TWILIGHT_SHIFTED, COLORMAP_TURBO, COLORMAP_DEEPGREEN] +ColormapTypes = int +"""One of [COLORMAP_AUTUMN, COLORMAP_BONE, COLORMAP_JET, COLORMAP_WINTER, COLORMAP_RAINBOW, COLORMAP_OCEAN, +COLORMAP_SUMMER, COLORMAP_SPRING, COLORMAP_COOL, COLORMAP_HSV, COLORMAP_PINK, COLORMAP_HOT, COLORMAP_PARULA, +COLORMAP_MAGMA, COLORMAP_INFERNO, COLORMAP_PLASMA, COLORMAP_VIRIDIS, COLORMAP_CIVIDIS, COLORMAP_TWILIGHT, +COLORMAP_TWILIGHT_SHIFTED, COLORMAP_TURBO, COLORMAP_DEEPGREEN]""" INPAINT_NS: int INPAINT_TELEA: int @@ -1006,10 +1170,17 @@ MOTION_AFFINE: int MOTION_HOMOGRAPHY: int DrawMatchesFlags_DEFAULT: int +DRAW_MATCHES_FLAGS_DEFAULT: int DrawMatchesFlags_DRAW_OVER_OUTIMG: int +DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG: int DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS: int +DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS: int DrawMatchesFlags_DRAW_RICH_KEYPOINTS: int -DrawMatchesFlags = int # One of [DEFAULT, DRAW_OVER_OUTIMG, NOT_DRAW_SINGLE_POINTS, DRAW_RICH_KEYPOINTS] +DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS: int +DrawMatchesFlags = int +"""One of [DrawMatchesFlags_DEFAULT, DRAW_MATCHES_FLAGS_DEFAULT, DrawMatchesFlags_DRAW_OVER_OUTIMG, +DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG, DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS, DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS, +DrawMatchesFlags_DRAW_RICH_KEYPOINTS, DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS]""" IMREAD_UNCHANGED: int IMREAD_GRAYSCALE: int @@ -1024,9 +1195,10 @@ IMREAD_REDUCED_COLOR_4: int IMREAD_REDUCED_GRAYSCALE_8: int IMREAD_REDUCED_COLOR_8: int IMREAD_IGNORE_ORIENTATION: int -ImreadModes = int # One of [IMREAD_UNCHANGED, IMREAD_GRAYSCALE, IMREAD_COLOR, IMREAD_ANYDEPTH, IMREAD_ANYCOLOR, -# IMREAD_LOAD_GDAL, IMREAD_REDUCED_GRAYSCALE_2, IMREAD_REDUCED_COLOR_2, IMREAD_REDUCED_GRAYSCALE_4, -# IMREAD_REDUCED_COLOR_4, IMREAD_REDUCED_GRAYSCALE_8, IMREAD_REDUCED_COLOR_8, IMREAD_IGNORE_ORIENTATION] +ImreadModes = int +"""One of [IMREAD_UNCHANGED, IMREAD_GRAYSCALE, IMREAD_COLOR, IMREAD_ANYDEPTH, IMREAD_ANYCOLOR, IMREAD_LOAD_GDAL, +IMREAD_REDUCED_GRAYSCALE_2, IMREAD_REDUCED_COLOR_2, IMREAD_REDUCED_GRAYSCALE_4, IMREAD_REDUCED_COLOR_4, +IMREAD_REDUCED_GRAYSCALE_8, IMREAD_REDUCED_COLOR_8, IMREAD_IGNORE_ORIENTATION]""" IMWRITE_JPEG_QUALITY: int IMWRITE_JPEG_PROGRESSIVE: int @@ -1050,26 +1222,30 @@ IMWRITE_TIFF_XDPI: int IMWRITE_TIFF_YDPI: int IMWRITE_TIFF_COMPRESSION: int IMWRITE_JPEG2000_COMPRESSION_X1000: int -ImwriteFlags = int # One of [IMWRITE_JPEG_QUALITY, IMWRITE_JPEG_PROGRESSIVE, IMWRITE_JPEG_OPTIMIZE, -# IMWRITE_JPEG_RST_INTERVAL, IMWRITE_JPEG_LUMA_QUALITY, IMWRITE_JPEG_CHROMA_QUALITY, IMWRITE_JPEG_SAMPLING_FACTOR, -# IMWRITE_PNG_COMPRESSION, IMWRITE_PNG_STRATEGY, IMWRITE_PNG_BILEVEL, IMWRITE_PXM_BINARY, IMWRITE_EXR_TYPE, -# IMWRITE_EXR_COMPRESSION, IMWRITE_EXR_DWA_COMPRESSION_LEVEL, IMWRITE_WEBP_QUALITY, IMWRITE_HDR_COMPRESSION, -# IMWRITE_PAM_TUPLETYPE, IMWRITE_TIFF_RESUNIT, IMWRITE_TIFF_XDPI, IMWRITE_TIFF_YDPI, IMWRITE_TIFF_COMPRESSION, -# IMWRITE_JPEG2000_COMPRESSION_X1000] +IMWRITE_AVIF_QUALITY: int +IMWRITE_AVIF_DEPTH: int +IMWRITE_AVIF_SPEED: int +ImwriteFlags = int +"""One of [IMWRITE_JPEG_QUALITY, IMWRITE_JPEG_PROGRESSIVE, IMWRITE_JPEG_OPTIMIZE, IMWRITE_JPEG_RST_INTERVAL, +IMWRITE_JPEG_LUMA_QUALITY, IMWRITE_JPEG_CHROMA_QUALITY, IMWRITE_JPEG_SAMPLING_FACTOR, IMWRITE_PNG_COMPRESSION, +IMWRITE_PNG_STRATEGY, IMWRITE_PNG_BILEVEL, IMWRITE_PXM_BINARY, IMWRITE_EXR_TYPE, IMWRITE_EXR_COMPRESSION, +IMWRITE_EXR_DWA_COMPRESSION_LEVEL, IMWRITE_WEBP_QUALITY, IMWRITE_HDR_COMPRESSION, IMWRITE_PAM_TUPLETYPE, +IMWRITE_TIFF_RESUNIT, IMWRITE_TIFF_XDPI, IMWRITE_TIFF_YDPI, IMWRITE_TIFF_COMPRESSION, +IMWRITE_JPEG2000_COMPRESSION_X1000, IMWRITE_AVIF_QUALITY, IMWRITE_AVIF_DEPTH, IMWRITE_AVIF_SPEED]""" IMWRITE_JPEG_SAMPLING_FACTOR_411: int IMWRITE_JPEG_SAMPLING_FACTOR_420: int IMWRITE_JPEG_SAMPLING_FACTOR_422: int IMWRITE_JPEG_SAMPLING_FACTOR_440: int IMWRITE_JPEG_SAMPLING_FACTOR_444: int -# One of [IMWRITE_JPEG_SAMPLING_FACTOR_411, -# IMWRITE_JPEG_SAMPLING_FACTOR_420, IMWRITE_JPEG_SAMPLING_FACTOR_422, -# IMWRITE_JPEG_SAMPLING_FACTOR_440, IMWRITE_JPEG_SAMPLING_FACTOR_444] ImwriteJPEGSamplingFactorParams = int +"""One of [IMWRITE_JPEG_SAMPLING_FACTOR_411, IMWRITE_JPEG_SAMPLING_FACTOR_420, IMWRITE_JPEG_SAMPLING_FACTOR_422, +IMWRITE_JPEG_SAMPLING_FACTOR_440, IMWRITE_JPEG_SAMPLING_FACTOR_444]""" IMWRITE_EXR_TYPE_HALF: int IMWRITE_EXR_TYPE_FLOAT: int -ImwriteEXRTypeFlags = int # One of [IMWRITE_EXR_TYPE_HALF, IMWRITE_EXR_TYPE_FLOAT] +ImwriteEXRTypeFlags = int +"""One of [IMWRITE_EXR_TYPE_HALF, IMWRITE_EXR_TYPE_FLOAT]""" IMWRITE_EXR_COMPRESSION_NO: int IMWRITE_EXR_COMPRESSION_RLE: int @@ -1081,22 +1257,19 @@ IMWRITE_EXR_COMPRESSION_B44: int IMWRITE_EXR_COMPRESSION_B44A: int IMWRITE_EXR_COMPRESSION_DWAA: int IMWRITE_EXR_COMPRESSION_DWAB: int -# One of [IMWRITE_EXR_COMPRESSION_NO, IMWRITE_EXR_COMPRESSION_RLE, -# IMWRITE_EXR_COMPRESSION_ZIPS, IMWRITE_EXR_COMPRESSION_ZIP, -# IMWRITE_EXR_COMPRESSION_PIZ, IMWRITE_EXR_COMPRESSION_PXR24, -# IMWRITE_EXR_COMPRESSION_B44, IMWRITE_EXR_COMPRESSION_B44A, -# IMWRITE_EXR_COMPRESSION_DWAA, IMWRITE_EXR_COMPRESSION_DWAB] ImwriteEXRCompressionFlags = int +"""One of [IMWRITE_EXR_COMPRESSION_NO, IMWRITE_EXR_COMPRESSION_RLE, IMWRITE_EXR_COMPRESSION_ZIPS, +IMWRITE_EXR_COMPRESSION_ZIP, IMWRITE_EXR_COMPRESSION_PIZ, IMWRITE_EXR_COMPRESSION_PXR24, IMWRITE_EXR_COMPRESSION_B44, +IMWRITE_EXR_COMPRESSION_B44A, IMWRITE_EXR_COMPRESSION_DWAA, IMWRITE_EXR_COMPRESSION_DWAB]""" IMWRITE_PNG_STRATEGY_DEFAULT: int IMWRITE_PNG_STRATEGY_FILTERED: int IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY: int IMWRITE_PNG_STRATEGY_RLE: int IMWRITE_PNG_STRATEGY_FIXED: int -# One of [IMWRITE_PNG_STRATEGY_DEFAULT, IMWRITE_PNG_STRATEGY_FILTERED, -# IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY, IMWRITE_PNG_STRATEGY_RLE, -# IMWRITE_PNG_STRATEGY_FIXED] ImwritePNGFlags = int +"""One of [IMWRITE_PNG_STRATEGY_DEFAULT, IMWRITE_PNG_STRATEGY_FILTERED, IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY, +IMWRITE_PNG_STRATEGY_RLE, IMWRITE_PNG_STRATEGY_FIXED]""" IMWRITE_PAM_FORMAT_NULL: int IMWRITE_PAM_FORMAT_BLACKANDWHITE: int @@ -1104,14 +1277,14 @@ IMWRITE_PAM_FORMAT_GRAYSCALE: int IMWRITE_PAM_FORMAT_GRAYSCALE_ALPHA: int IMWRITE_PAM_FORMAT_RGB: int IMWRITE_PAM_FORMAT_RGB_ALPHA: int -# One of [IMWRITE_PAM_FORMAT_NULL, IMWRITE_PAM_FORMAT_BLACKANDWHITE, -# IMWRITE_PAM_FORMAT_GRAYSCALE, IMWRITE_PAM_FORMAT_GRAYSCALE_ALPHA, -# IMWRITE_PAM_FORMAT_RGB, IMWRITE_PAM_FORMAT_RGB_ALPHA] ImwritePAMFlags = int +"""One of [IMWRITE_PAM_FORMAT_NULL, IMWRITE_PAM_FORMAT_BLACKANDWHITE, IMWRITE_PAM_FORMAT_GRAYSCALE, +IMWRITE_PAM_FORMAT_GRAYSCALE_ALPHA, IMWRITE_PAM_FORMAT_RGB, IMWRITE_PAM_FORMAT_RGB_ALPHA]""" IMWRITE_HDR_COMPRESSION_NONE: int IMWRITE_HDR_COMPRESSION_RLE: int -ImwriteHDRCompressionFlags = int # One of [IMWRITE_HDR_COMPRESSION_NONE, IMWRITE_HDR_COMPRESSION_RLE] +ImwriteHDRCompressionFlags = int +"""One of [IMWRITE_HDR_COMPRESSION_NONE, IMWRITE_HDR_COMPRESSION_RLE]""" CAP_ANY: int CAP_VFW: int @@ -1149,11 +1322,12 @@ CAP_INTEL_MFX: int CAP_XINE: int CAP_UEYE: int CAP_OBSENSOR: int -VideoCaptureAPIs = int # One of [CAP_ANY, CAP_VFW, CAP_V4L, CAP_V4L2, CAP_FIREWIRE, CAP_FIREWARE, CAP_IEEE1394, -# CAP_DC1394, CAP_CMU1394, CAP_QT, CAP_UNICAP, CAP_DSHOW, CAP_PVAPI, CAP_OPENNI, CAP_OPENNI_ASUS, CAP_ANDROID, -# CAP_XIAPI, CAP_AVFOUNDATION, CAP_GIGANETIX, CAP_MSMF, CAP_WINRT, CAP_INTELPERC, CAP_REALSENSE, CAP_OPENNI2, -# CAP_OPENNI2_ASUS, CAP_OPENNI2_ASTRA, CAP_GPHOTO2, CAP_GSTREAMER, CAP_FFMPEG, CAP_IMAGES, CAP_ARAVIS, CAP_OPENCV_MJPEG, -# CAP_INTEL_MFX, CAP_XINE, CAP_UEYE, CAP_OBSENSOR] +VideoCaptureAPIs = int +"""One of [CAP_ANY, CAP_VFW, CAP_V4L, CAP_V4L2, CAP_FIREWIRE, CAP_FIREWARE, CAP_IEEE1394, CAP_DC1394, CAP_CMU1394, +CAP_QT, CAP_UNICAP, CAP_DSHOW, CAP_PVAPI, CAP_OPENNI, CAP_OPENNI_ASUS, CAP_ANDROID, CAP_XIAPI, CAP_AVFOUNDATION, +CAP_GIGANETIX, CAP_MSMF, CAP_WINRT, CAP_INTELPERC, CAP_REALSENSE, CAP_OPENNI2, CAP_OPENNI2_ASUS, CAP_OPENNI2_ASTRA, +CAP_GPHOTO2, CAP_GSTREAMER, CAP_FFMPEG, CAP_IMAGES, CAP_ARAVIS, CAP_OPENCV_MJPEG, CAP_INTEL_MFX, CAP_XINE, CAP_UEYE, +CAP_OBSENSOR]""" CAP_PROP_POS_MSEC: int CAP_PROP_POS_FRAMES: int @@ -1225,21 +1399,22 @@ CAP_PROP_LRF_HAS_KEY_FRAME: int CAP_PROP_CODEC_EXTRADATA_INDEX: int CAP_PROP_FRAME_TYPE: int CAP_PROP_N_THREADS: int -VideoCaptureProperties = int # One of [CAP_PROP_POS_MSEC, CAP_PROP_POS_FRAMES, CAP_PROP_POS_AVI_RATIO, -# CAP_PROP_FRAME_WIDTH, CAP_PROP_FRAME_HEIGHT, CAP_PROP_FPS, CAP_PROP_FOURCC, CAP_PROP_FRAME_COUNT, CAP_PROP_FORMAT, -# CAP_PROP_MODE, CAP_PROP_BRIGHTNESS, CAP_PROP_CONTRAST, CAP_PROP_SATURATION, CAP_PROP_HUE, CAP_PROP_GAIN, -# CAP_PROP_EXPOSURE, CAP_PROP_CONVERT_RGB, CAP_PROP_WHITE_BALANCE_BLUE_U, CAP_PROP_RECTIFICATION, CAP_PROP_MONOCHROME, -# CAP_PROP_SHARPNESS, CAP_PROP_AUTO_EXPOSURE, CAP_PROP_GAMMA, CAP_PROP_TEMPERATURE, CAP_PROP_TRIGGER, -# CAP_PROP_TRIGGER_DELAY, CAP_PROP_WHITE_BALANCE_RED_V, CAP_PROP_ZOOM, CAP_PROP_FOCUS, CAP_PROP_GUID, -# CAP_PROP_ISO_SPEED, CAP_PROP_BACKLIGHT, CAP_PROP_PAN, CAP_PROP_TILT, CAP_PROP_ROLL, CAP_PROP_IRIS, CAP_PROP_SETTINGS, -# CAP_PROP_BUFFERSIZE, CAP_PROP_AUTOFOCUS, CAP_PROP_SAR_NUM, CAP_PROP_SAR_DEN, CAP_PROP_BACKEND, CAP_PROP_CHANNEL, -# CAP_PROP_AUTO_WB, CAP_PROP_WB_TEMPERATURE, CAP_PROP_CODEC_PIXEL_FORMAT, CAP_PROP_BITRATE, CAP_PROP_ORIENTATION_META, -# CAP_PROP_ORIENTATION_AUTO, CAP_PROP_HW_ACCELERATION, CAP_PROP_HW_DEVICE, CAP_PROP_HW_ACCELERATION_USE_OPENCL, -# CAP_PROP_OPEN_TIMEOUT_MSEC, CAP_PROP_READ_TIMEOUT_MSEC, CAP_PROP_STREAM_OPEN_TIME_USEC, CAP_PROP_VIDEO_TOTAL_CHANNELS, -# CAP_PROP_VIDEO_STREAM, CAP_PROP_AUDIO_STREAM, CAP_PROP_AUDIO_POS, CAP_PROP_AUDIO_SHIFT_NSEC, -# CAP_PROP_AUDIO_DATA_DEPTH, CAP_PROP_AUDIO_SAMPLES_PER_SECOND, CAP_PROP_AUDIO_BASE_INDEX, -# CAP_PROP_AUDIO_TOTAL_CHANNELS, CAP_PROP_AUDIO_TOTAL_STREAMS, CAP_PROP_AUDIO_SYNCHRONIZE, CAP_PROP_LRF_HAS_KEY_FRAME, -# CAP_PROP_CODEC_EXTRADATA_INDEX, CAP_PROP_FRAME_TYPE, CAP_PROP_N_THREADS] +VideoCaptureProperties = int +"""One of [CAP_PROP_POS_MSEC, CAP_PROP_POS_FRAMES, CAP_PROP_POS_AVI_RATIO, CAP_PROP_FRAME_WIDTH, CAP_PROP_FRAME_HEIGHT, +CAP_PROP_FPS, CAP_PROP_FOURCC, CAP_PROP_FRAME_COUNT, CAP_PROP_FORMAT, CAP_PROP_MODE, CAP_PROP_BRIGHTNESS, +CAP_PROP_CONTRAST, CAP_PROP_SATURATION, CAP_PROP_HUE, CAP_PROP_GAIN, CAP_PROP_EXPOSURE, CAP_PROP_CONVERT_RGB, +CAP_PROP_WHITE_BALANCE_BLUE_U, CAP_PROP_RECTIFICATION, CAP_PROP_MONOCHROME, CAP_PROP_SHARPNESS, CAP_PROP_AUTO_EXPOSURE, +CAP_PROP_GAMMA, CAP_PROP_TEMPERATURE, CAP_PROP_TRIGGER, CAP_PROP_TRIGGER_DELAY, CAP_PROP_WHITE_BALANCE_RED_V, +CAP_PROP_ZOOM, CAP_PROP_FOCUS, CAP_PROP_GUID, CAP_PROP_ISO_SPEED, CAP_PROP_BACKLIGHT, CAP_PROP_PAN, CAP_PROP_TILT, +CAP_PROP_ROLL, CAP_PROP_IRIS, CAP_PROP_SETTINGS, CAP_PROP_BUFFERSIZE, CAP_PROP_AUTOFOCUS, CAP_PROP_SAR_NUM, +CAP_PROP_SAR_DEN, CAP_PROP_BACKEND, CAP_PROP_CHANNEL, CAP_PROP_AUTO_WB, CAP_PROP_WB_TEMPERATURE, +CAP_PROP_CODEC_PIXEL_FORMAT, CAP_PROP_BITRATE, CAP_PROP_ORIENTATION_META, CAP_PROP_ORIENTATION_AUTO, +CAP_PROP_HW_ACCELERATION, CAP_PROP_HW_DEVICE, CAP_PROP_HW_ACCELERATION_USE_OPENCL, CAP_PROP_OPEN_TIMEOUT_MSEC, +CAP_PROP_READ_TIMEOUT_MSEC, CAP_PROP_STREAM_OPEN_TIME_USEC, CAP_PROP_VIDEO_TOTAL_CHANNELS, CAP_PROP_VIDEO_STREAM, +CAP_PROP_AUDIO_STREAM, CAP_PROP_AUDIO_POS, CAP_PROP_AUDIO_SHIFT_NSEC, CAP_PROP_AUDIO_DATA_DEPTH, +CAP_PROP_AUDIO_SAMPLES_PER_SECOND, CAP_PROP_AUDIO_BASE_INDEX, CAP_PROP_AUDIO_TOTAL_CHANNELS, +CAP_PROP_AUDIO_TOTAL_STREAMS, CAP_PROP_AUDIO_SYNCHRONIZE, CAP_PROP_LRF_HAS_KEY_FRAME, CAP_PROP_CODEC_EXTRADATA_INDEX, +CAP_PROP_FRAME_TYPE, CAP_PROP_N_THREADS]""" VIDEOWRITER_PROP_QUALITY: int VIDEOWRITER_PROP_FRAMEBYTES: int @@ -1249,42 +1424,41 @@ VIDEOWRITER_PROP_DEPTH: int VIDEOWRITER_PROP_HW_ACCELERATION: int VIDEOWRITER_PROP_HW_DEVICE: int VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL: int -# One of [VIDEOWRITER_PROP_QUALITY, VIDEOWRITER_PROP_FRAMEBYTES, -# VIDEOWRITER_PROP_NSTRIPES, VIDEOWRITER_PROP_IS_COLOR, -# VIDEOWRITER_PROP_DEPTH, VIDEOWRITER_PROP_HW_ACCELERATION, -# VIDEOWRITER_PROP_HW_DEVICE, VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL] VideoWriterProperties = int +"""One of [VIDEOWRITER_PROP_QUALITY, VIDEOWRITER_PROP_FRAMEBYTES, VIDEOWRITER_PROP_NSTRIPES, VIDEOWRITER_PROP_IS_COLOR, +VIDEOWRITER_PROP_DEPTH, VIDEOWRITER_PROP_HW_ACCELERATION, VIDEOWRITER_PROP_HW_DEVICE, +VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL]""" VIDEO_ACCELERATION_NONE: int VIDEO_ACCELERATION_ANY: int VIDEO_ACCELERATION_D3D11: int VIDEO_ACCELERATION_VAAPI: int VIDEO_ACCELERATION_MFX: int -# One of [VIDEO_ACCELERATION_NONE, VIDEO_ACCELERATION_ANY, -# VIDEO_ACCELERATION_D3D11, VIDEO_ACCELERATION_VAAPI, -# VIDEO_ACCELERATION_MFX] VideoAccelerationType = int +"""One of [VIDEO_ACCELERATION_NONE, VIDEO_ACCELERATION_ANY, VIDEO_ACCELERATION_D3D11, VIDEO_ACCELERATION_VAAPI, +VIDEO_ACCELERATION_MFX]""" CAP_OBSENSOR_DEPTH_MAP: int CAP_OBSENSOR_BGR_IMAGE: int CAP_OBSENSOR_IR_IMAGE: int -VideoCaptureOBSensorDataType = int # One of [CAP_OBSENSOR_DEPTH_MAP, CAP_OBSENSOR_BGR_IMAGE, CAP_OBSENSOR_IR_IMAGE] +VideoCaptureOBSensorDataType = int +"""One of [CAP_OBSENSOR_DEPTH_MAP, CAP_OBSENSOR_BGR_IMAGE, CAP_OBSENSOR_IR_IMAGE]""" CAP_OBSENSOR_DEPTH_GENERATOR: int CAP_OBSENSOR_IMAGE_GENERATOR: int CAP_OBSENSOR_IR_GENERATOR: int CAP_OBSENSOR_GENERATORS_MASK: int -# One of [CAP_OBSENSOR_DEPTH_GENERATOR, CAP_OBSENSOR_IMAGE_GENERATOR, -# CAP_OBSENSOR_IR_GENERATOR, CAP_OBSENSOR_GENERATORS_MASK] VideoCaptureOBSensorGenerators = int +"""One of [CAP_OBSENSOR_DEPTH_GENERATOR, CAP_OBSENSOR_IMAGE_GENERATOR, CAP_OBSENSOR_IR_GENERATOR, +CAP_OBSENSOR_GENERATORS_MASK]""" CAP_PROP_OBSENSOR_INTRINSIC_FX: int CAP_PROP_OBSENSOR_INTRINSIC_FY: int CAP_PROP_OBSENSOR_INTRINSIC_CX: int CAP_PROP_OBSENSOR_INTRINSIC_CY: int -# One of [CAP_PROP_OBSENSOR_INTRINSIC_FX, CAP_PROP_OBSENSOR_INTRINSIC_FY, -# CAP_PROP_OBSENSOR_INTRINSIC_CX, CAP_PROP_OBSENSOR_INTRINSIC_CY] VideoCaptureOBSensorProperties = int +"""One of [CAP_PROP_OBSENSOR_INTRINSIC_FX, CAP_PROP_OBSENSOR_INTRINSIC_FY, CAP_PROP_OBSENSOR_INTRINSIC_CX, +CAP_PROP_OBSENSOR_INTRINSIC_CY]""" SOLVEPNP_ITERATIVE: int SOLVEPNP_EPNP: int @@ -1296,52 +1470,63 @@ SOLVEPNP_IPPE: int SOLVEPNP_IPPE_SQUARE: int SOLVEPNP_SQPNP: int SOLVEPNP_MAX_COUNT: int -# One of [SOLVEPNP_ITERATIVE, SOLVEPNP_EPNP, SOLVEPNP_P3P, SOLVEPNP_DLS, -# SOLVEPNP_UPNP, SOLVEPNP_AP3P, SOLVEPNP_IPPE, SOLVEPNP_IPPE_SQUARE, -# SOLVEPNP_SQPNP, SOLVEPNP_MAX_COUNT] SolvePnPMethod = int +"""One of [SOLVEPNP_ITERATIVE, SOLVEPNP_EPNP, SOLVEPNP_P3P, SOLVEPNP_DLS, SOLVEPNP_UPNP, SOLVEPNP_AP3P, SOLVEPNP_IPPE, +SOLVEPNP_IPPE_SQUARE, SOLVEPNP_SQPNP, SOLVEPNP_MAX_COUNT]""" CALIB_HAND_EYE_TSAI: int CALIB_HAND_EYE_PARK: int CALIB_HAND_EYE_HORAUD: int CALIB_HAND_EYE_ANDREFF: int CALIB_HAND_EYE_DANIILIDIS: int -# One of [CALIB_HAND_EYE_TSAI, CALIB_HAND_EYE_PARK, CALIB_HAND_EYE_HORAUD, -# CALIB_HAND_EYE_ANDREFF, CALIB_HAND_EYE_DANIILIDIS] HandEyeCalibrationMethod = int +"""One of [CALIB_HAND_EYE_TSAI, CALIB_HAND_EYE_PARK, CALIB_HAND_EYE_HORAUD, CALIB_HAND_EYE_ANDREFF, +CALIB_HAND_EYE_DANIILIDIS]""" CALIB_ROBOT_WORLD_HAND_EYE_SHAH: int CALIB_ROBOT_WORLD_HAND_EYE_LI: int -RobotWorldHandEyeCalibrationMethod = int # One of [CALIB_ROBOT_WORLD_HAND_EYE_SHAH, CALIB_ROBOT_WORLD_HAND_EYE_LI] +RobotWorldHandEyeCalibrationMethod = int +"""One of [CALIB_ROBOT_WORLD_HAND_EYE_SHAH, CALIB_ROBOT_WORLD_HAND_EYE_LI]""" SAMPLING_UNIFORM: int SAMPLING_PROGRESSIVE_NAPSAC: int SAMPLING_NAPSAC: int SAMPLING_PROSAC: int -SamplingMethod = int # One of [SAMPLING_UNIFORM, SAMPLING_PROGRESSIVE_NAPSAC, SAMPLING_NAPSAC, SAMPLING_PROSAC] +SamplingMethod = int +"""One of [SAMPLING_UNIFORM, SAMPLING_PROGRESSIVE_NAPSAC, SAMPLING_NAPSAC, SAMPLING_PROSAC]""" LOCAL_OPTIM_NULL: int LOCAL_OPTIM_INNER_LO: int LOCAL_OPTIM_INNER_AND_ITER_LO: int LOCAL_OPTIM_GC: int LOCAL_OPTIM_SIGMA: int -# One of [LOCAL_OPTIM_NULL, LOCAL_OPTIM_INNER_LO, LOCAL_OPTIM_INNER_AND_ITER_LO, LOCAL_OPTIM_GC, LOCAL_OPTIM_SIGMA] LocalOptimMethod = int +"""One of [LOCAL_OPTIM_NULL, LOCAL_OPTIM_INNER_LO, LOCAL_OPTIM_INNER_AND_ITER_LO, LOCAL_OPTIM_GC, LOCAL_OPTIM_SIGMA]""" SCORE_METHOD_RANSAC: int SCORE_METHOD_MSAC: int SCORE_METHOD_MAGSAC: int SCORE_METHOD_LMEDS: int -ScoreMethod = int # One of [SCORE_METHOD_RANSAC, SCORE_METHOD_MSAC, SCORE_METHOD_MAGSAC, SCORE_METHOD_LMEDS] +ScoreMethod = int +"""One of [SCORE_METHOD_RANSAC, SCORE_METHOD_MSAC, SCORE_METHOD_MAGSAC, SCORE_METHOD_LMEDS]""" NEIGH_FLANN_KNN: int NEIGH_GRID: int NEIGH_FLANN_RADIUS: int -NeighborSearchMethod = int # One of [NEIGH_FLANN_KNN, NEIGH_GRID, NEIGH_FLANN_RADIUS] +NeighborSearchMethod = int +"""One of [NEIGH_FLANN_KNN, NEIGH_GRID, NEIGH_FLANN_RADIUS]""" + +NONE_POLISHER: int +LSQ_POLISHER: int +MAGSAC: int +COV_POLISHER: int +PolishingMethod = int +"""One of [NONE_POLISHER, LSQ_POLISHER, MAGSAC, COV_POLISHER]""" PROJ_SPHERICAL_ORTHO: int PROJ_SPHERICAL_EQRECT: int -UndistortTypes = int # One of [PROJ_SPHERICAL_ORTHO, PROJ_SPHERICAL_EQRECT] +UndistortTypes = int +"""One of [PROJ_SPHERICAL_ORTHO, PROJ_SPHERICAL_EQRECT]""" WINDOW_NORMAL: int WINDOW_AUTOSIZE: int @@ -1351,10 +1536,9 @@ WINDOW_FREERATIO: int WINDOW_KEEPRATIO: int WINDOW_GUI_EXPANDED: int WINDOW_GUI_NORMAL: int -# One of [WINDOW_NORMAL, WINDOW_AUTOSIZE, WINDOW_OPENGL, -# WINDOW_FULLSCREEN, WINDOW_FREERATIO, WINDOW_KEEPRATIO, -# WINDOW_GUI_EXPANDED, WINDOW_GUI_NORMAL] WindowFlags = int +"""One of [WINDOW_NORMAL, WINDOW_AUTOSIZE, WINDOW_OPENGL, WINDOW_FULLSCREEN, WINDOW_FREERATIO, WINDOW_KEEPRATIO, +WINDOW_GUI_EXPANDED, WINDOW_GUI_NORMAL]""" WND_PROP_FULLSCREEN: int WND_PROP_AUTOSIZE: int @@ -1363,9 +1547,9 @@ WND_PROP_OPENGL: int WND_PROP_VISIBLE: int WND_PROP_TOPMOST: int WND_PROP_VSYNC: int -# One of [WND_PROP_FULLSCREEN, WND_PROP_AUTOSIZE, WND_PROP_ASPECT_RATIO, -# WND_PROP_OPENGL, WND_PROP_VISIBLE, WND_PROP_TOPMOST, WND_PROP_VSYNC] WindowPropertyFlags = int +"""One of [WND_PROP_FULLSCREEN, WND_PROP_AUTOSIZE, WND_PROP_ASPECT_RATIO, WND_PROP_OPENGL, WND_PROP_VISIBLE, +WND_PROP_TOPMOST, WND_PROP_VSYNC]""" EVENT_MOUSEMOVE: int EVENT_LBUTTONDOWN: int @@ -1379,11 +1563,9 @@ EVENT_RBUTTONDBLCLK: int EVENT_MBUTTONDBLCLK: int EVENT_MOUSEWHEEL: int EVENT_MOUSEHWHEEL: int -# One of [EVENT_MOUSEMOVE, EVENT_LBUTTONDOWN, EVENT_RBUTTONDOWN, -# EVENT_MBUTTONDOWN, EVENT_LBUTTONUP, EVENT_RBUTTONUP, EVENT_MBUTTONUP, -# EVENT_LBUTTONDBLCLK, EVENT_RBUTTONDBLCLK, EVENT_MBUTTONDBLCLK, -# EVENT_MOUSEWHEEL, EVENT_MOUSEHWHEEL] MouseEventTypes = int +"""One of [EVENT_MOUSEMOVE, EVENT_LBUTTONDOWN, EVENT_RBUTTONDOWN, EVENT_MBUTTONDOWN, EVENT_LBUTTONUP, EVENT_RBUTTONUP, + EVENT_MBUTTONUP, EVENT_LBUTTONDBLCLK, EVENT_RBUTTONDBLCLK, EVENT_MBUTTONDBLCLK, EVENT_MOUSEWHEEL, EVENT_MOUSEHWHEEL]""" EVENT_FLAG_LBUTTON: int EVENT_FLAG_RBUTTON: int @@ -1391,342 +1573,612 @@ EVENT_FLAG_MBUTTON: int EVENT_FLAG_CTRLKEY: int EVENT_FLAG_SHIFTKEY: int EVENT_FLAG_ALTKEY: int -# One of [EVENT_FLAG_LBUTTON, EVENT_FLAG_RBUTTON, EVENT_FLAG_MBUTTON, -# EVENT_FLAG_CTRLKEY, EVENT_FLAG_SHIFTKEY, EVENT_FLAG_ALTKEY] MouseEventFlags = int +"""One of [EVENT_FLAG_LBUTTON, EVENT_FLAG_RBUTTON, EVENT_FLAG_MBUTTON, EVENT_FLAG_CTRLKEY, EVENT_FLAG_SHIFTKEY, +EVENT_FLAG_ALTKEY]""" QT_FONT_LIGHT: int QT_FONT_NORMAL: int QT_FONT_DEMIBOLD: int QT_FONT_BOLD: int QT_FONT_BLACK: int -QtFontWeights = int # One of [QT_FONT_LIGHT, QT_FONT_NORMAL, QT_FONT_DEMIBOLD, QT_FONT_BOLD, QT_FONT_BLACK] +QtFontWeights = int +"""One of [QT_FONT_LIGHT, QT_FONT_NORMAL, QT_FONT_DEMIBOLD, QT_FONT_BOLD, QT_FONT_BLACK]""" QT_STYLE_NORMAL: int QT_STYLE_ITALIC: int QT_STYLE_OBLIQUE: int -QtFontStyles = int # One of [QT_STYLE_NORMAL, QT_STYLE_ITALIC, QT_STYLE_OBLIQUE] +QtFontStyles = int +"""One of [QT_STYLE_NORMAL, QT_STYLE_ITALIC, QT_STYLE_OBLIQUE]""" QT_PUSH_BUTTON: int QT_CHECKBOX: int QT_RADIOBOX: int QT_NEW_BUTTONBAR: int -QtButtonTypes = int # One of [QT_PUSH_BUTTON, QT_CHECKBOX, QT_RADIOBOX, QT_NEW_BUTTONBAR] +QtButtonTypes = int +"""One of [QT_PUSH_BUTTON, QT_CHECKBOX, QT_RADIOBOX, QT_NEW_BUTTONBAR]""" GShape_GMAT: int +GSHAPE_GMAT: int GShape_GSCALAR: int +GSHAPE_GSCALAR: int GShape_GARRAY: int +GSHAPE_GARRAY: int GShape_GOPAQUE: int +GSHAPE_GOPAQUE: int GShape_GFRAME: int -GShape = int # One of [GMAT, GSCALAR, GARRAY, GOPAQUE, GFRAME] +GSHAPE_GFRAME: int +GShape = int +"""One of [GShape_GMAT, GSHAPE_GMAT, GShape_GSCALAR, GSHAPE_GSCALAR, GShape_GARRAY, GSHAPE_GARRAY, GShape_GOPAQUE, +GSHAPE_GOPAQUE, GShape_GFRAME, GSHAPE_GFRAME]""" MediaFormat_BGR: int +MEDIA_FORMAT_BGR: int MediaFormat_NV12: int +MEDIA_FORMAT_NV12: int MediaFormat_GRAY: int -MediaFormat = int # One of [BGR, NV12, GRAY] +MEDIA_FORMAT_GRAY: int +MediaFormat = int +"""One of [MediaFormat_BGR, MEDIA_FORMAT_BGR, MediaFormat_NV12, MEDIA_FORMAT_NV12, MediaFormat_GRAY, +MEDIA_FORMAT_GRAY]""" FileStorage_READ: int +FILE_STORAGE_READ: int FileStorage_WRITE: int +FILE_STORAGE_WRITE: int FileStorage_APPEND: int +FILE_STORAGE_APPEND: int FileStorage_MEMORY: int +FILE_STORAGE_MEMORY: int FileStorage_FORMAT_MASK: int +FILE_STORAGE_FORMAT_MASK: int FileStorage_FORMAT_AUTO: int +FILE_STORAGE_FORMAT_AUTO: int FileStorage_FORMAT_XML: int +FILE_STORAGE_FORMAT_XML: int FileStorage_FORMAT_YAML: int +FILE_STORAGE_FORMAT_YAML: int FileStorage_FORMAT_JSON: int +FILE_STORAGE_FORMAT_JSON: int FileStorage_BASE64: int +FILE_STORAGE_BASE64: int FileStorage_WRITE_BASE64: int -# One of [READ, WRITE, APPEND, MEMORY, FORMAT_MASK, FORMAT_AUTO, -# FORMAT_XML, FORMAT_YAML, FORMAT_JSON, BASE64, WRITE_BASE64] +FILE_STORAGE_WRITE_BASE64: int FileStorage_Mode = int +"""One of [FileStorage_READ, FILE_STORAGE_READ, FileStorage_WRITE, FILE_STORAGE_WRITE, FileStorage_APPEND, +FILE_STORAGE_APPEND, FileStorage_MEMORY, FILE_STORAGE_MEMORY, FileStorage_FORMAT_MASK, FILE_STORAGE_FORMAT_MASK, +FileStorage_FORMAT_AUTO, FILE_STORAGE_FORMAT_AUTO, FileStorage_FORMAT_XML, FILE_STORAGE_FORMAT_XML, +FileStorage_FORMAT_YAML, FILE_STORAGE_FORMAT_YAML, FileStorage_FORMAT_JSON, FILE_STORAGE_FORMAT_JSON, +FileStorage_BASE64, FILE_STORAGE_BASE64, FileStorage_WRITE_BASE64, FILE_STORAGE_WRITE_BASE64]""" FileStorage_UNDEFINED: int +FILE_STORAGE_UNDEFINED: int FileStorage_VALUE_EXPECTED: int +FILE_STORAGE_VALUE_EXPECTED: int FileStorage_NAME_EXPECTED: int +FILE_STORAGE_NAME_EXPECTED: int FileStorage_INSIDE_MAP: int -FileStorage_State = int # One of [UNDEFINED, VALUE_EXPECTED, NAME_EXPECTED, INSIDE_MAP] +FILE_STORAGE_INSIDE_MAP: int +FileStorage_State = int +"""One of [FileStorage_UNDEFINED, FILE_STORAGE_UNDEFINED, FileStorage_VALUE_EXPECTED, FILE_STORAGE_VALUE_EXPECTED, +FileStorage_NAME_EXPECTED, FILE_STORAGE_NAME_EXPECTED, FileStorage_INSIDE_MAP, FILE_STORAGE_INSIDE_MAP]""" FileNode_NONE: int +FILE_NODE_NONE: int FileNode_INT: int +FILE_NODE_INT: int FileNode_REAL: int +FILE_NODE_REAL: int FileNode_FLOAT: int +FILE_NODE_FLOAT: int FileNode_STR: int +FILE_NODE_STR: int FileNode_STRING: int +FILE_NODE_STRING: int FileNode_SEQ: int +FILE_NODE_SEQ: int FileNode_MAP: int +FILE_NODE_MAP: int FileNode_TYPE_MASK: int +FILE_NODE_TYPE_MASK: int FileNode_FLOW: int +FILE_NODE_FLOW: int FileNode_UNIFORM: int +FILE_NODE_UNIFORM: int FileNode_EMPTY: int +FILE_NODE_EMPTY: int FileNode_NAMED: int +FILE_NODE_NAMED: int UMat_MAGIC_VAL: int +UMAT_MAGIC_VAL: int UMat_AUTO_STEP: int +UMAT_AUTO_STEP: int UMat_CONTINUOUS_FLAG: int +UMAT_CONTINUOUS_FLAG: int UMat_SUBMATRIX_FLAG: int +UMAT_SUBMATRIX_FLAG: int UMat_MAGIC_MASK: int +UMAT_MAGIC_MASK: int UMat_TYPE_MASK: int +UMAT_TYPE_MASK: int UMat_DEPTH_MASK: int +UMAT_DEPTH_MASK: int Subdiv2D_PTLOC_ERROR: int +SUBDIV2D_PTLOC_ERROR: int Subdiv2D_PTLOC_OUTSIDE_RECT: int +SUBDIV2D_PTLOC_OUTSIDE_RECT: int Subdiv2D_PTLOC_INSIDE: int +SUBDIV2D_PTLOC_INSIDE: int Subdiv2D_PTLOC_VERTEX: int +SUBDIV2D_PTLOC_VERTEX: int Subdiv2D_PTLOC_ON_EDGE: int +SUBDIV2D_PTLOC_ON_EDGE: int Subdiv2D_NEXT_AROUND_ORG: int +SUBDIV2D_NEXT_AROUND_ORG: int Subdiv2D_NEXT_AROUND_DST: int +SUBDIV2D_NEXT_AROUND_DST: int Subdiv2D_PREV_AROUND_ORG: int +SUBDIV2D_PREV_AROUND_ORG: int Subdiv2D_PREV_AROUND_DST: int +SUBDIV2D_PREV_AROUND_DST: int Subdiv2D_NEXT_AROUND_LEFT: int +SUBDIV2D_NEXT_AROUND_LEFT: int Subdiv2D_NEXT_AROUND_RIGHT: int +SUBDIV2D_NEXT_AROUND_RIGHT: int Subdiv2D_PREV_AROUND_LEFT: int +SUBDIV2D_PREV_AROUND_LEFT: int Subdiv2D_PREV_AROUND_RIGHT: int +SUBDIV2D_PREV_AROUND_RIGHT: int ORB_HARRIS_SCORE: int ORB_FAST_SCORE: int -ORB_ScoreType = int # One of [HARRIS_SCORE, FAST_SCORE] +ORB_ScoreType = int +"""One of [ORB_HARRIS_SCORE, ORB_FAST_SCORE]""" FastFeatureDetector_TYPE_5_8: int +FAST_FEATURE_DETECTOR_TYPE_5_8: int FastFeatureDetector_TYPE_7_12: int +FAST_FEATURE_DETECTOR_TYPE_7_12: int FastFeatureDetector_TYPE_9_16: int -FastFeatureDetector_DetectorType = int # One of [TYPE_5_8, TYPE_7_12, TYPE_9_16] +FAST_FEATURE_DETECTOR_TYPE_9_16: int +FastFeatureDetector_DetectorType = int +"""One of [FastFeatureDetector_TYPE_5_8, FAST_FEATURE_DETECTOR_TYPE_5_8, FastFeatureDetector_TYPE_7_12, +FAST_FEATURE_DETECTOR_TYPE_7_12, FastFeatureDetector_TYPE_9_16, FAST_FEATURE_DETECTOR_TYPE_9_16]""" FastFeatureDetector_THRESHOLD: int +FAST_FEATURE_DETECTOR_THRESHOLD: int FastFeatureDetector_NONMAX_SUPPRESSION: int +FAST_FEATURE_DETECTOR_NONMAX_SUPPRESSION: int FastFeatureDetector_FAST_N: int +FAST_FEATURE_DETECTOR_FAST_N: int AgastFeatureDetector_AGAST_5_8: int +AGAST_FEATURE_DETECTOR_AGAST_5_8: int AgastFeatureDetector_AGAST_7_12d: int +AGAST_FEATURE_DETECTOR_AGAST_7_12D: int AgastFeatureDetector_AGAST_7_12s: int +AGAST_FEATURE_DETECTOR_AGAST_7_12S: int AgastFeatureDetector_OAST_9_16: int -AgastFeatureDetector_DetectorType = int # One of [AGAST_5_8, AGAST_7_12d, AGAST_7_12s, OAST_9_16] +AGAST_FEATURE_DETECTOR_OAST_9_16: int +AgastFeatureDetector_DetectorType = int +"""One of [AgastFeatureDetector_AGAST_5_8, AGAST_FEATURE_DETECTOR_AGAST_5_8, AgastFeatureDetector_AGAST_7_12d, +AGAST_FEATURE_DETECTOR_AGAST_7_12D, AgastFeatureDetector_AGAST_7_12s, AGAST_FEATURE_DETECTOR_AGAST_7_12S, +AgastFeatureDetector_OAST_9_16, AGAST_FEATURE_DETECTOR_OAST_9_16]""" AgastFeatureDetector_THRESHOLD: int +AGAST_FEATURE_DETECTOR_THRESHOLD: int AgastFeatureDetector_NONMAX_SUPPRESSION: int +AGAST_FEATURE_DETECTOR_NONMAX_SUPPRESSION: int KAZE_DIFF_PM_G1: int KAZE_DIFF_PM_G2: int KAZE_DIFF_WEICKERT: int KAZE_DIFF_CHARBONNIER: int -KAZE_DiffusivityType = int # One of [DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT, DIFF_CHARBONNIER] +KAZE_DiffusivityType = int +"""One of [KAZE_DIFF_PM_G1, KAZE_DIFF_PM_G2, KAZE_DIFF_WEICKERT, KAZE_DIFF_CHARBONNIER]""" AKAZE_DESCRIPTOR_KAZE_UPRIGHT: int AKAZE_DESCRIPTOR_KAZE: int AKAZE_DESCRIPTOR_MLDB_UPRIGHT: int AKAZE_DESCRIPTOR_MLDB: int -# One of [DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_KAZE, DESCRIPTOR_MLDB_UPRIGHT, DESCRIPTOR_MLDB] AKAZE_DescriptorType = int +"""One of [AKAZE_DESCRIPTOR_KAZE_UPRIGHT, AKAZE_DESCRIPTOR_KAZE, AKAZE_DESCRIPTOR_MLDB_UPRIGHT, +AKAZE_DESCRIPTOR_MLDB]""" DescriptorMatcher_FLANNBASED: int +DESCRIPTOR_MATCHER_FLANNBASED: int DescriptorMatcher_BRUTEFORCE: int +DESCRIPTOR_MATCHER_BRUTEFORCE: int DescriptorMatcher_BRUTEFORCE_L1: int +DESCRIPTOR_MATCHER_BRUTEFORCE_L1: int DescriptorMatcher_BRUTEFORCE_HAMMING: int +DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING: int DescriptorMatcher_BRUTEFORCE_HAMMINGLUT: int +DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMINGLUT: int DescriptorMatcher_BRUTEFORCE_SL2: int -# One of [FLANNBASED, BRUTEFORCE, BRUTEFORCE_L1, BRUTEFORCE_HAMMING, BRUTEFORCE_HAMMINGLUT, BRUTEFORCE_SL2] +DESCRIPTOR_MATCHER_BRUTEFORCE_SL2: int DescriptorMatcher_MatcherType = int +"""One of [DescriptorMatcher_FLANNBASED, DESCRIPTOR_MATCHER_FLANNBASED, DescriptorMatcher_BRUTEFORCE, +DESCRIPTOR_MATCHER_BRUTEFORCE, DescriptorMatcher_BRUTEFORCE_L1, DESCRIPTOR_MATCHER_BRUTEFORCE_L1, +DescriptorMatcher_BRUTEFORCE_HAMMING, DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING, DescriptorMatcher_BRUTEFORCE_HAMMINGLUT, +DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMINGLUT, DescriptorMatcher_BRUTEFORCE_SL2, DESCRIPTOR_MATCHER_BRUTEFORCE_SL2]""" CirclesGridFinderParameters_SYMMETRIC_GRID: int +CIRCLES_GRID_FINDER_PARAMETERS_SYMMETRIC_GRID: int CirclesGridFinderParameters_ASYMMETRIC_GRID: int -CirclesGridFinderParameters_GridType = int # One of [SYMMETRIC_GRID, ASYMMETRIC_GRID] +CIRCLES_GRID_FINDER_PARAMETERS_ASYMMETRIC_GRID: int +CirclesGridFinderParameters_GridType = int +"""One of [CirclesGridFinderParameters_SYMMETRIC_GRID, CIRCLES_GRID_FINDER_PARAMETERS_SYMMETRIC_GRID, +CirclesGridFinderParameters_ASYMMETRIC_GRID, CIRCLES_GRID_FINDER_PARAMETERS_ASYMMETRIC_GRID]""" StereoMatcher_DISP_SHIFT: int +STEREO_MATCHER_DISP_SHIFT: int StereoMatcher_DISP_SCALE: int +STEREO_MATCHER_DISP_SCALE: int StereoBM_PREFILTER_NORMALIZED_RESPONSE: int +STEREO_BM_PREFILTER_NORMALIZED_RESPONSE: int StereoBM_PREFILTER_XSOBEL: int +STEREO_BM_PREFILTER_XSOBEL: int StereoSGBM_MODE_SGBM: int +STEREO_SGBM_MODE_SGBM: int StereoSGBM_MODE_HH: int +STEREO_SGBM_MODE_HH: int StereoSGBM_MODE_SGBM_3WAY: int +STEREO_SGBM_MODE_SGBM_3WAY: int StereoSGBM_MODE_HH4: int +STEREO_SGBM_MODE_HH4: int HOGDescriptor_L2Hys: int -HOGDescriptor_HistogramNormType = int # One of [L2Hys] +HOGDESCRIPTOR_L2HYS: int +HOGDescriptor_HistogramNormType = int +"""One of [HOGDescriptor_L2Hys, HOGDESCRIPTOR_L2HYS]""" HOGDescriptor_DEFAULT_NLEVELS: int +HOGDESCRIPTOR_DEFAULT_NLEVELS: int HOGDescriptor_DESCR_FORMAT_COL_BY_COL: int +HOGDESCRIPTOR_DESCR_FORMAT_COL_BY_COL: int HOGDescriptor_DESCR_FORMAT_ROW_BY_ROW: int -HOGDescriptor_DescriptorStorageFormat = int # One of [DESCR_FORMAT_COL_BY_COL, DESCR_FORMAT_ROW_BY_ROW] +HOGDESCRIPTOR_DESCR_FORMAT_ROW_BY_ROW: int +HOGDescriptor_DescriptorStorageFormat = int +"""One of [HOGDescriptor_DESCR_FORMAT_COL_BY_COL, HOGDESCRIPTOR_DESCR_FORMAT_COL_BY_COL, +HOGDescriptor_DESCR_FORMAT_ROW_BY_ROW, HOGDESCRIPTOR_DESCR_FORMAT_ROW_BY_ROW]""" QRCodeEncoder_MODE_AUTO: int +QRCODE_ENCODER_MODE_AUTO: int QRCodeEncoder_MODE_NUMERIC: int +QRCODE_ENCODER_MODE_NUMERIC: int QRCodeEncoder_MODE_ALPHANUMERIC: int +QRCODE_ENCODER_MODE_ALPHANUMERIC: int QRCodeEncoder_MODE_BYTE: int +QRCODE_ENCODER_MODE_BYTE: int QRCodeEncoder_MODE_ECI: int +QRCODE_ENCODER_MODE_ECI: int QRCodeEncoder_MODE_KANJI: int +QRCODE_ENCODER_MODE_KANJI: int QRCodeEncoder_MODE_STRUCTURED_APPEND: int -# One of [MODE_AUTO, MODE_NUMERIC, MODE_ALPHANUMERIC, MODE_BYTE, MODE_ECI, MODE_KANJI, MODE_STRUCTURED_APPEND] +QRCODE_ENCODER_MODE_STRUCTURED_APPEND: int QRCodeEncoder_EncodeMode = int +"""One of [QRCodeEncoder_MODE_AUTO, QRCODE_ENCODER_MODE_AUTO, QRCodeEncoder_MODE_NUMERIC, QRCODE_ENCODER_MODE_NUMERIC, +QRCodeEncoder_MODE_ALPHANUMERIC, QRCODE_ENCODER_MODE_ALPHANUMERIC, QRCodeEncoder_MODE_BYTE, QRCODE_ENCODER_MODE_BYTE, +QRCodeEncoder_MODE_ECI, QRCODE_ENCODER_MODE_ECI, QRCodeEncoder_MODE_KANJI, QRCODE_ENCODER_MODE_KANJI, + QRCodeEncoder_MODE_STRUCTURED_APPEND, QRCODE_ENCODER_MODE_STRUCTURED_APPEND]""" QRCodeEncoder_CORRECT_LEVEL_L: int +QRCODE_ENCODER_CORRECT_LEVEL_L: int QRCodeEncoder_CORRECT_LEVEL_M: int +QRCODE_ENCODER_CORRECT_LEVEL_M: int QRCodeEncoder_CORRECT_LEVEL_Q: int +QRCODE_ENCODER_CORRECT_LEVEL_Q: int QRCodeEncoder_CORRECT_LEVEL_H: int -QRCodeEncoder_CorrectionLevel = int # One of [CORRECT_LEVEL_L, CORRECT_LEVEL_M, CORRECT_LEVEL_Q, CORRECT_LEVEL_H] +QRCODE_ENCODER_CORRECT_LEVEL_H: int +QRCodeEncoder_CorrectionLevel = int +"""One of [QRCodeEncoder_CORRECT_LEVEL_L, QRCODE_ENCODER_CORRECT_LEVEL_L, QRCodeEncoder_CORRECT_LEVEL_M, +QRCODE_ENCODER_CORRECT_LEVEL_M, QRCodeEncoder_CORRECT_LEVEL_Q, QRCODE_ENCODER_CORRECT_LEVEL_Q, +QRCodeEncoder_CORRECT_LEVEL_H, QRCODE_ENCODER_CORRECT_LEVEL_H]""" QRCodeEncoder_ECI_UTF8: int -QRCodeEncoder_ECIEncodings = int # One of [ECI_UTF8] +QRCODE_ENCODER_ECI_UTF8: int +QRCodeEncoder_ECIEncodings = int +"""One of [QRCodeEncoder_ECI_UTF8, QRCODE_ENCODER_ECI_UTF8]""" FaceRecognizerSF_FR_COSINE: int +FACE_RECOGNIZER_SF_FR_COSINE: int FaceRecognizerSF_FR_NORM_L2: int -FaceRecognizerSF_DisType = int # One of [FR_COSINE, FR_NORM_L2] +FACE_RECOGNIZER_SF_FR_NORM_L2: int +FaceRecognizerSF_DisType = int +"""One of [FaceRecognizerSF_FR_COSINE, FACE_RECOGNIZER_SF_FR_COSINE, FaceRecognizerSF_FR_NORM_L2, +FACE_RECOGNIZER_SF_FR_NORM_L2]""" Stitcher_OK: int +STITCHER_OK: int Stitcher_ERR_NEED_MORE_IMGS: int +STITCHER_ERR_NEED_MORE_IMGS: int Stitcher_ERR_HOMOGRAPHY_EST_FAIL: int +STITCHER_ERR_HOMOGRAPHY_EST_FAIL: int Stitcher_ERR_CAMERA_PARAMS_ADJUST_FAIL: int -Stitcher_Status = int # One of [OK, ERR_NEED_MORE_IMGS, ERR_HOMOGRAPHY_EST_FAIL, ERR_CAMERA_PARAMS_ADJUST_FAIL] +STITCHER_ERR_CAMERA_PARAMS_ADJUST_FAIL: int +Stitcher_Status = int +"""One of [Stitcher_OK, STITCHER_OK, Stitcher_ERR_NEED_MORE_IMGS, STITCHER_ERR_NEED_MORE_IMGS, +Stitcher_ERR_HOMOGRAPHY_EST_FAIL, STITCHER_ERR_HOMOGRAPHY_EST_FAIL, Stitcher_ERR_CAMERA_PARAMS_ADJUST_FAIL, +STITCHER_ERR_CAMERA_PARAMS_ADJUST_FAIL]""" Stitcher_PANORAMA: int +STITCHER_PANORAMA: int Stitcher_SCANS: int -Stitcher_Mode = int # One of [PANORAMA, SCANS] +STITCHER_SCANS: int +Stitcher_Mode = int +"""One of [Stitcher_PANORAMA, STITCHER_PANORAMA, Stitcher_SCANS, STITCHER_SCANS]""" DISOpticalFlow_PRESET_ULTRAFAST: int +DISOPTICAL_FLOW_PRESET_ULTRAFAST: int DISOpticalFlow_PRESET_FAST: int +DISOPTICAL_FLOW_PRESET_FAST: int DISOpticalFlow_PRESET_MEDIUM: int +DISOPTICAL_FLOW_PRESET_MEDIUM: int PCA_DATA_AS_ROW: int PCA_DATA_AS_COL: int PCA_USE_AVG: int -PCA_Flags = int # One of [DATA_AS_ROW, DATA_AS_COL, USE_AVG] +PCA_Flags = int +"""One of [PCA_DATA_AS_ROW, PCA_DATA_AS_COL, PCA_USE_AVG]""" SVD_MODIFY_A: int SVD_NO_UV: int SVD_FULL_UV: int -SVD_Flags = int # One of [MODIFY_A, NO_UV, FULL_UV] +SVD_Flags = int +"""One of [SVD_MODIFY_A, SVD_NO_UV, SVD_FULL_UV]""" RNG_UNIFORM: int RNG_NORMAL: int Formatter_FMT_DEFAULT: int +FORMATTER_FMT_DEFAULT: int Formatter_FMT_MATLAB: int +FORMATTER_FMT_MATLAB: int Formatter_FMT_CSV: int +FORMATTER_FMT_CSV: int Formatter_FMT_PYTHON: int +FORMATTER_FMT_PYTHON: int Formatter_FMT_NUMPY: int +FORMATTER_FMT_NUMPY: int Formatter_FMT_C: int -Formatter_FormatType = int # One of [FMT_DEFAULT, FMT_MATLAB, FMT_CSV, FMT_PYTHON, FMT_NUMPY, FMT_C] +FORMATTER_FMT_C: int +Formatter_FormatType = int +"""One of [Formatter_FMT_DEFAULT, FORMATTER_FMT_DEFAULT, Formatter_FMT_MATLAB, FORMATTER_FMT_MATLAB, Formatter_FMT_CSV, +FORMATTER_FMT_CSV, Formatter_FMT_PYTHON, FORMATTER_FMT_PYTHON, Formatter_FMT_NUMPY, FORMATTER_FMT_NUMPY, +Formatter_FMT_C, FORMATTER_FMT_C]""" _InputArray_KIND_SHIFT: int +_INPUT_ARRAY_KIND_SHIFT: int _InputArray_FIXED_TYPE: int +_INPUT_ARRAY_FIXED_TYPE: int _InputArray_FIXED_SIZE: int +_INPUT_ARRAY_FIXED_SIZE: int _InputArray_KIND_MASK: int +_INPUT_ARRAY_KIND_MASK: int _InputArray_NONE: int +_INPUT_ARRAY_NONE: int _InputArray_MAT: int +_INPUT_ARRAY_MAT: int _InputArray_MATX: int +_INPUT_ARRAY_MATX: int _InputArray_STD_VECTOR: int +_INPUT_ARRAY_STD_VECTOR: int _InputArray_STD_VECTOR_VECTOR: int +_INPUT_ARRAY_STD_VECTOR_VECTOR: int _InputArray_STD_VECTOR_MAT: int +_INPUT_ARRAY_STD_VECTOR_MAT: int _InputArray_EXPR: int +_INPUT_ARRAY_EXPR: int _InputArray_OPENGL_BUFFER: int +_INPUT_ARRAY_OPENGL_BUFFER: int _InputArray_CUDA_HOST_MEM: int +_INPUT_ARRAY_CUDA_HOST_MEM: int _InputArray_CUDA_GPU_MAT: int +_INPUT_ARRAY_CUDA_GPU_MAT: int _InputArray_UMAT: int +_INPUT_ARRAY_UMAT: int _InputArray_STD_VECTOR_UMAT: int +_INPUT_ARRAY_STD_VECTOR_UMAT: int _InputArray_STD_BOOL_VECTOR: int +_INPUT_ARRAY_STD_BOOL_VECTOR: int _InputArray_STD_VECTOR_CUDA_GPU_MAT: int +_INPUT_ARRAY_STD_VECTOR_CUDA_GPU_MAT: int _InputArray_STD_ARRAY: int +_INPUT_ARRAY_STD_ARRAY: int _InputArray_STD_ARRAY_MAT: int -# One of [KIND_SHIFT, FIXED_TYPE, FIXED_SIZE, KIND_MASK, NONE, MAT, MATX, -# STD_VECTOR, STD_VECTOR_VECTOR, STD_VECTOR_MAT, EXPR, OPENGL_BUFFER, -# CUDA_HOST_MEM, CUDA_GPU_MAT, UMAT, STD_VECTOR_UMAT, STD_BOOL_VECTOR, -# STD_VECTOR_CUDA_GPU_MAT, STD_ARRAY, STD_ARRAY_MAT] +_INPUT_ARRAY_STD_ARRAY_MAT: int _InputArray_KindFlag = int +"""One of [_InputArray_KIND_SHIFT, _INPUT_ARRAY_KIND_SHIFT, _InputArray_FIXED_TYPE, _INPUT_ARRAY_FIXED_TYPE, +_InputArray_FIXED_SIZE, _INPUT_ARRAY_FIXED_SIZE, _InputArray_KIND_MASK, _INPUT_ARRAY_KIND_MASK, _InputArray_NONE, +_INPUT_ARRAY_NONE, _InputArray_MAT, _INPUT_ARRAY_MAT, _InputArray_MATX, _INPUT_ARRAY_MATX, _InputArray_STD_VECTOR, +_INPUT_ARRAY_STD_VECTOR, _InputArray_STD_VECTOR_VECTOR, _INPUT_ARRAY_STD_VECTOR_VECTOR, _InputArray_STD_VECTOR_MAT, +_INPUT_ARRAY_STD_VECTOR_MAT, _InputArray_EXPR, _INPUT_ARRAY_EXPR, _InputArray_OPENGL_BUFFER, _INPUT_ARRAY_OPENGL_BUFFER, +_InputArray_CUDA_HOST_MEM, _INPUT_ARRAY_CUDA_HOST_MEM, _InputArray_CUDA_GPU_MAT, _INPUT_ARRAY_CUDA_GPU_MAT, +_InputArray_UMAT, _INPUT_ARRAY_UMAT, _InputArray_STD_VECTOR_UMAT, _INPUT_ARRAY_STD_VECTOR_UMAT, +_InputArray_STD_BOOL_VECTOR, _INPUT_ARRAY_STD_BOOL_VECTOR, _InputArray_STD_VECTOR_CUDA_GPU_MAT, +_INPUT_ARRAY_STD_VECTOR_CUDA_GPU_MAT, _InputArray_STD_ARRAY, _INPUT_ARRAY_STD_ARRAY, _InputArray_STD_ARRAY_MAT, +_INPUT_ARRAY_STD_ARRAY_MAT]""" _OutputArray_DEPTH_MASK_8U: int +_OUTPUT_ARRAY_DEPTH_MASK_8U: int _OutputArray_DEPTH_MASK_8S: int +_OUTPUT_ARRAY_DEPTH_MASK_8S: int _OutputArray_DEPTH_MASK_16U: int +_OUTPUT_ARRAY_DEPTH_MASK_16U: int _OutputArray_DEPTH_MASK_16S: int +_OUTPUT_ARRAY_DEPTH_MASK_16S: int _OutputArray_DEPTH_MASK_32S: int +_OUTPUT_ARRAY_DEPTH_MASK_32S: int _OutputArray_DEPTH_MASK_32F: int +_OUTPUT_ARRAY_DEPTH_MASK_32F: int _OutputArray_DEPTH_MASK_64F: int +_OUTPUT_ARRAY_DEPTH_MASK_64F: int _OutputArray_DEPTH_MASK_16F: int +_OUTPUT_ARRAY_DEPTH_MASK_16F: int _OutputArray_DEPTH_MASK_ALL: int +_OUTPUT_ARRAY_DEPTH_MASK_ALL: int _OutputArray_DEPTH_MASK_ALL_BUT_8S: int +_OUTPUT_ARRAY_DEPTH_MASK_ALL_BUT_8S: int _OutputArray_DEPTH_MASK_ALL_16F: int +_OUTPUT_ARRAY_DEPTH_MASK_ALL_16F: int _OutputArray_DEPTH_MASK_FLT: int -# One of [DEPTH_MASK_8U, DEPTH_MASK_8S, DEPTH_MASK_16U, DEPTH_MASK_16S, -# DEPTH_MASK_32S, DEPTH_MASK_32F, DEPTH_MASK_64F, DEPTH_MASK_16F, -# DEPTH_MASK_ALL, DEPTH_MASK_ALL_BUT_8S, DEPTH_MASK_ALL_16F, -# DEPTH_MASK_FLT] +_OUTPUT_ARRAY_DEPTH_MASK_FLT: int _OutputArray_DepthMask = int +"""One of [_OutputArray_DEPTH_MASK_8U, _OUTPUT_ARRAY_DEPTH_MASK_8U, _OutputArray_DEPTH_MASK_8S, +_OUTPUT_ARRAY_DEPTH_MASK_8S, _OutputArray_DEPTH_MASK_16U, _OUTPUT_ARRAY_DEPTH_MASK_16U, _OutputArray_DEPTH_MASK_16S, +_OUTPUT_ARRAY_DEPTH_MASK_16S, _OutputArray_DEPTH_MASK_32S, _OUTPUT_ARRAY_DEPTH_MASK_32S, _OutputArray_DEPTH_MASK_32F, +_OUTPUT_ARRAY_DEPTH_MASK_32F, _OutputArray_DEPTH_MASK_64F, _OUTPUT_ARRAY_DEPTH_MASK_64F, _OutputArray_DEPTH_MASK_16F, +_OUTPUT_ARRAY_DEPTH_MASK_16F, _OutputArray_DEPTH_MASK_ALL, _OUTPUT_ARRAY_DEPTH_MASK_ALL, +_OutputArray_DEPTH_MASK_ALL_BUT_8S, _OUTPUT_ARRAY_DEPTH_MASK_ALL_BUT_8S, _OutputArray_DEPTH_MASK_ALL_16F, +_OUTPUT_ARRAY_DEPTH_MASK_ALL_16F, _OutputArray_DEPTH_MASK_FLT, _OUTPUT_ARRAY_DEPTH_MASK_FLT]""" UMatData_COPY_ON_MAP: int +UMAT_DATA_COPY_ON_MAP: int UMatData_HOST_COPY_OBSOLETE: int +UMAT_DATA_HOST_COPY_OBSOLETE: int UMatData_DEVICE_COPY_OBSOLETE: int +UMAT_DATA_DEVICE_COPY_OBSOLETE: int UMatData_TEMP_UMAT: int +UMAT_DATA_TEMP_UMAT: int UMatData_TEMP_COPIED_UMAT: int +UMAT_DATA_TEMP_COPIED_UMAT: int UMatData_USER_ALLOCATED: int +UMAT_DATA_USER_ALLOCATED: int UMatData_DEVICE_MEM_MAPPED: int +UMAT_DATA_DEVICE_MEM_MAPPED: int UMatData_ASYNC_CLEANUP: int -# One of [COPY_ON_MAP, HOST_COPY_OBSOLETE, DEVICE_COPY_OBSOLETE, -# TEMP_UMAT, TEMP_COPIED_UMAT, USER_ALLOCATED, DEVICE_MEM_MAPPED, -# ASYNC_CLEANUP] +UMAT_DATA_ASYNC_CLEANUP: int UMatData_MemoryFlag = int +"""One of [UMatData_COPY_ON_MAP, UMAT_DATA_COPY_ON_MAP, UMatData_HOST_COPY_OBSOLETE, UMAT_DATA_HOST_COPY_OBSOLETE, +UMatData_DEVICE_COPY_OBSOLETE, UMAT_DATA_DEVICE_COPY_OBSOLETE, UMatData_TEMP_UMAT, UMAT_DATA_TEMP_UMAT, +UMatData_TEMP_COPIED_UMAT, UMAT_DATA_TEMP_COPIED_UMAT, UMatData_USER_ALLOCATED, UMAT_DATA_USER_ALLOCATED, +UMatData_DEVICE_MEM_MAPPED, UMAT_DATA_DEVICE_MEM_MAPPED, UMatData_ASYNC_CLEANUP, UMAT_DATA_ASYNC_CLEANUP]""" Mat_MAGIC_VAL: int +MAT_MAGIC_VAL: int Mat_AUTO_STEP: int +MAT_AUTO_STEP: int Mat_CONTINUOUS_FLAG: int +MAT_CONTINUOUS_FLAG: int Mat_SUBMATRIX_FLAG: int +MAT_SUBMATRIX_FLAG: int Mat_MAGIC_MASK: int +MAT_MAGIC_MASK: int Mat_TYPE_MASK: int +MAT_TYPE_MASK: int Mat_DEPTH_MASK: int +MAT_DEPTH_MASK: int SparseMat_MAGIC_VAL: int +SPARSE_MAT_MAGIC_VAL: int SparseMat_MAX_DIM: int +SPARSE_MAT_MAX_DIM: int SparseMat_HASH_SCALE: int +SPARSE_MAT_HASH_SCALE: int SparseMat_HASH_BIT: int +SPARSE_MAT_HASH_BIT: int QuatEnum_INT_XYZ: int +QUAT_ENUM_INT_XYZ: int QuatEnum_INT_XZY: int +QUAT_ENUM_INT_XZY: int QuatEnum_INT_YXZ: int +QUAT_ENUM_INT_YXZ: int QuatEnum_INT_YZX: int +QUAT_ENUM_INT_YZX: int QuatEnum_INT_ZXY: int +QUAT_ENUM_INT_ZXY: int QuatEnum_INT_ZYX: int +QUAT_ENUM_INT_ZYX: int QuatEnum_INT_XYX: int +QUAT_ENUM_INT_XYX: int QuatEnum_INT_XZX: int +QUAT_ENUM_INT_XZX: int QuatEnum_INT_YXY: int +QUAT_ENUM_INT_YXY: int QuatEnum_INT_YZY: int +QUAT_ENUM_INT_YZY: int QuatEnum_INT_ZXZ: int +QUAT_ENUM_INT_ZXZ: int QuatEnum_INT_ZYZ: int +QUAT_ENUM_INT_ZYZ: int QuatEnum_EXT_XYZ: int +QUAT_ENUM_EXT_XYZ: int QuatEnum_EXT_XZY: int +QUAT_ENUM_EXT_XZY: int QuatEnum_EXT_YXZ: int +QUAT_ENUM_EXT_YXZ: int QuatEnum_EXT_YZX: int +QUAT_ENUM_EXT_YZX: int QuatEnum_EXT_ZXY: int +QUAT_ENUM_EXT_ZXY: int QuatEnum_EXT_ZYX: int +QUAT_ENUM_EXT_ZYX: int QuatEnum_EXT_XYX: int +QUAT_ENUM_EXT_XYX: int QuatEnum_EXT_XZX: int +QUAT_ENUM_EXT_XZX: int QuatEnum_EXT_YXY: int +QUAT_ENUM_EXT_YXY: int QuatEnum_EXT_YZY: int +QUAT_ENUM_EXT_YZY: int QuatEnum_EXT_ZXZ: int +QUAT_ENUM_EXT_ZXZ: int QuatEnum_EXT_ZYZ: int +QUAT_ENUM_EXT_ZYZ: int QuatEnum_EULER_ANGLES_MAX_VALUE: int -# One of [INT_XYZ, INT_XZY, INT_YXZ, INT_YZX, INT_ZXY, INT_ZYX, INT_XYX, -# INT_XZX, INT_YXY, INT_YZY, INT_ZXZ, INT_ZYZ, EXT_XYZ, EXT_XZY, EXT_YXZ, -# EXT_YZX, EXT_ZXY, EXT_ZYX, EXT_XYX, EXT_XZX, EXT_YXY, EXT_YZY, EXT_ZXZ, -# EXT_ZYZ, EULER_ANGLES_MAX_VALUE] +QUAT_ENUM_EULER_ANGLES_MAX_VALUE: int QuatEnum_EulerAnglesType = int +"""One of [QuatEnum_INT_XYZ, QUAT_ENUM_INT_XYZ, QuatEnum_INT_XZY, QUAT_ENUM_INT_XZY, QuatEnum_INT_YXZ, +QUAT_ENUM_INT_YXZ, QuatEnum_INT_YZX, QUAT_ENUM_INT_YZX, QuatEnum_INT_ZXY, QUAT_ENUM_INT_ZXY, QuatEnum_INT_ZYX, +QUAT_ENUM_INT_ZYX, QuatEnum_INT_XYX, QUAT_ENUM_INT_XYX, QuatEnum_INT_XZX, QUAT_ENUM_INT_XZX, QuatEnum_INT_YXY, +QUAT_ENUM_INT_YXY, QuatEnum_INT_YZY, QUAT_ENUM_INT_YZY, QuatEnum_INT_ZXZ, QUAT_ENUM_INT_ZXZ, QuatEnum_INT_ZYZ, +QUAT_ENUM_INT_ZYZ, QuatEnum_EXT_XYZ, QUAT_ENUM_EXT_XYZ, QuatEnum_EXT_XZY, QUAT_ENUM_EXT_XZY, QuatEnum_EXT_YXZ, +QUAT_ENUM_EXT_YXZ, QuatEnum_EXT_YZX, QUAT_ENUM_EXT_YZX, QuatEnum_EXT_ZXY, QUAT_ENUM_EXT_ZXY, QuatEnum_EXT_ZYX, +QUAT_ENUM_EXT_ZYX, QuatEnum_EXT_XYX, QUAT_ENUM_EXT_XYX, QuatEnum_EXT_XZX, QUAT_ENUM_EXT_XZX, QuatEnum_EXT_YXY, +QUAT_ENUM_EXT_YXY, QuatEnum_EXT_YZY, QUAT_ENUM_EXT_YZY, QuatEnum_EXT_ZXZ, QUAT_ENUM_EXT_ZXZ, QuatEnum_EXT_ZYZ, +QUAT_ENUM_EXT_ZYZ, QuatEnum_EULER_ANGLES_MAX_VALUE, QUAT_ENUM_EULER_ANGLES_MAX_VALUE]""" TermCriteria_COUNT: int +TERM_CRITERIA_COUNT: int TermCriteria_MAX_ITER: int +TERM_CRITERIA_MAX_ITER: int TermCriteria_EPS: int -TermCriteria_Type = int # One of [COUNT, MAX_ITER, EPS] +TERM_CRITERIA_EPS: int +TermCriteria_Type = int +"""One of [TermCriteria_COUNT, TERM_CRITERIA_COUNT, TermCriteria_MAX_ITER, TERM_CRITERIA_MAX_ITER, TermCriteria_EPS, +TERM_CRITERIA_EPS]""" GFluidKernel_Kind_Filter: int +GFLUID_KERNEL_KIND_FILTER: int GFluidKernel_Kind_Resize: int +GFLUID_KERNEL_KIND_RESIZE: int GFluidKernel_Kind_YUV420toRGB: int -GFluidKernel_Kind = int # One of [Filter, Resize, YUV420toRGB] +GFLUID_KERNEL_KIND_YUV420TO_RGB: int +GFluidKernel_Kind = int +"""One of [GFluidKernel_Kind_Filter, GFLUID_KERNEL_KIND_FILTER, GFluidKernel_Kind_Resize, GFLUID_KERNEL_KIND_RESIZE, +GFluidKernel_Kind_YUV420toRGB, GFLUID_KERNEL_KIND_YUV420TO_RGB]""" MediaFrame_Access_R: int +MEDIA_FRAME_ACCESS_R: int MediaFrame_Access_W: int -MediaFrame_Access = int # One of [R, W] +MEDIA_FRAME_ACCESS_W: int +MediaFrame_Access = int +"""One of [MediaFrame_Access_R, MEDIA_FRAME_ACCESS_R, MediaFrame_Access_W, MEDIA_FRAME_ACCESS_W]""" RMat_Access_R: int +RMAT_ACCESS_R: int RMat_Access_W: int -RMat_Access = int # One of [R, W] +RMAT_ACCESS_W: int +RMat_Access = int +"""One of [RMat_Access_R, RMAT_ACCESS_R, RMat_Access_W, RMAT_ACCESS_W]""" # Classes @@ -1850,6 +2302,24 @@ class FileNode: def mat(self) -> cv2.typing.MatLike: ... +class RotatedRect: + center: cv2.typing.Point2f + size: cv2.typing.Size2f + angle: float + + # Functions + @typing.overload + def __init__(self) -> None: ... + @typing.overload + def __init__(self, center: cv2.typing.Point2f, size: cv2.typing.Size2f, angle: float) -> None: ... + @typing.overload + def __init__(self, point1: cv2.typing.Point2f, point2: cv2.typing.Point2f, point3: cv2.typing.Point2f) -> None: ... + + def points(self) -> typing.Sequence[cv2.typing.Point2f]: ... + + def boundingRect(self) -> cv2.typing.Rect: ... + + class KeyPoint: pt: cv2.typing.Point2f size: float @@ -2333,6 +2803,8 @@ class UsacParams: sampler: SamplingMethod score: ScoreMethod threshold: float + final_polisher: PolishingMethod + final_polisher_iterations: int # Functions def __init__(self) -> None: ... @@ -2666,16 +3138,8 @@ class QRCodeEncoder: ) -> typing.Sequence[UMat]: ... -class QRCodeDetector: +class GraphicalCodeDetector: # Functions - def __init__(self) -> None: ... - - def setEpsX(self, epsX: float) -> None: ... - - def setEpsY(self, epsY: float) -> None: ... - - def setUseAlignmentMarkers(self, useAlignmentMarkers: bool) -> None: ... - @typing.overload def detect( self, @@ -2694,35 +3158,21 @@ class QRCodeDetector: self, img: cv2.typing.MatLike, points: cv2.typing.MatLike, - straight_qrcode: cv2.typing.MatLike | None = ..., - ) -> tuple[ - str, - cv2.typing.MatLike, - ]: ... - - @typing.overload - def decode(self, img: UMat, points: UMat, straight_qrcode: UMat | None = ...) -> tuple[str, UMat]: ... - - @typing.overload - def decodeCurved( - self, - img: cv2.typing.MatLike, - points: cv2.typing.MatLike, - straight_qrcode: cv2.typing.MatLike | None = ..., + straight_code: cv2.typing.MatLike | None = ..., ) -> tuple[ str, cv2.typing.MatLike, ]: ... @typing.overload - def decodeCurved(self, img: UMat, points: UMat, straight_qrcode: UMat | None = ...) -> tuple[str, UMat]: ... + def decode(self, img: UMat, points: UMat, straight_code: UMat | None = ...) -> tuple[str, UMat]: ... @typing.overload def detectAndDecode( self, img: cv2.typing.MatLike, points: cv2.typing.MatLike | None = ..., - straight_qrcode: cv2.typing.MatLike | None = ..., + straight_code: cv2.typing.MatLike | None = ..., ) -> tuple[ str, cv2.typing.MatLike, @@ -2734,31 +3184,7 @@ class QRCodeDetector: self, img: UMat, points: UMat | None = ..., - straight_qrcode: UMat | None = ..., - ) -> tuple[ - str, - UMat, - UMat, - ]: ... - - @typing.overload - def detectAndDecodeCurved( - self, - img: cv2.typing.MatLike, - points: cv2.typing.MatLike | None = ..., - straight_qrcode: cv2.typing.MatLike | None = ..., - ) -> tuple[ - str, - cv2.typing.MatLike, - cv2.typing.MatLike, - ]: ... - - @typing.overload - def detectAndDecodeCurved( - self, - img: UMat, - points: UMat | None = ..., - straight_qrcode: UMat | None = ..., + straight_code: UMat | None = ..., ) -> tuple[ str, UMat, @@ -2783,7 +3209,7 @@ class QRCodeDetector: self, img: cv2.typing.MatLike, points: cv2.typing.MatLike, - straight_qrcode: typing.Sequence[cv2.typing.MatLike] | None = ..., + straight_code: typing.Sequence[cv2.typing.MatLike] | None = ..., ) -> tuple[ bool, typing.Sequence[str], @@ -2795,7 +3221,7 @@ class QRCodeDetector: self, img: UMat, points: UMat, - straight_qrcode: typing.Sequence[UMat] | None = ..., + straight_code: typing.Sequence[UMat] | None = ..., ) -> tuple[ bool, typing.Sequence[str], @@ -2807,7 +3233,7 @@ class QRCodeDetector: self, img: cv2.typing.MatLike, points: cv2.typing.MatLike | None = ..., - straight_qrcode: typing.Sequence[cv2.typing.MatLike] | None = ..., + straight_code: typing.Sequence[cv2.typing.MatLike] | None = ..., ) -> tuple[ bool, typing.Sequence[str], @@ -2820,7 +3246,7 @@ class QRCodeDetector: self, img: UMat, points: UMat | None = ..., - straight_qrcode: typing.Sequence[UMat] | None = ..., + straight_code: typing.Sequence[UMat] | None = ..., ) -> tuple[ bool, typing.Sequence[str], @@ -4222,6 +4648,85 @@ class BaseCascadeClassifier(Algorithm): ... +class QRCodeDetector(GraphicalCodeDetector): + # Functions + def __init__(self) -> None: ... + + def setEpsX(self, epsX: float) -> QRCodeDetector: ... + + def setEpsY(self, epsY: float) -> QRCodeDetector: ... + + def setUseAlignmentMarkers(self, useAlignmentMarkers: bool) -> QRCodeDetector: ... + + @typing.overload + def decodeCurved( + self, + img: cv2.typing.MatLike, + points: cv2.typing.MatLike, + straight_qrcode: cv2.typing.MatLike | None = ..., + ) -> tuple[ + str, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def decodeCurved(self, img: UMat, points: UMat, straight_qrcode: UMat | None = ...) -> tuple[str, UMat]: ... + + @typing.overload + def detectAndDecodeCurved( + self, + img: cv2.typing.MatLike, + points: cv2.typing.MatLike | None = ..., + straight_qrcode: cv2.typing.MatLike | None = ..., + ) -> tuple[ + str, + cv2.typing.MatLike, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def detectAndDecodeCurved( + self, + img: UMat, + points: UMat | None = ..., + straight_qrcode: UMat | None = ..., + ) -> tuple[ + str, + UMat, + UMat, + ]: ... + + +class QRCodeDetectorAruco(GraphicalCodeDetector): + # Classes + class Params: + minModuleSizeInPyramid: float + maxRotation: float + maxModuleSizeMismatch: float + maxTimingPatternMismatch: float + maxPenalties: float + maxColorsMismatch: float + scaleTimingPatternScore: float + + # Functions + def __init__(self) -> None: ... + + # Functions + + @typing.overload + def __init__(self) -> None: ... + @typing.overload + def __init__(self, params: QRCodeDetectorAruco.Params) -> None: ... + + def getDetectorParameters(self) -> QRCodeDetectorAruco.Params: ... + + def setDetectorParameters(self, params: QRCodeDetectorAruco.Params) -> QRCodeDetectorAruco: ... + + def getArucoParameters(self) -> cv2.aruco.DetectorParameters: ... + + def setArucoParameters(self, params: cv2.aruco.DetectorParameters) -> None: ... + + class BackgroundSubtractor(Algorithm): # Functions @typing.overload @@ -4356,6 +4861,15 @@ class TrackerNano(Tracker): def getTrackingScore(self) -> float: ... +class error(Exception): + code: int + err: str + file: str + func: str + line: int + msg: str + + class GeneralizedHoughBallard(GeneralizedHough): # Functions def setLevels(self, levels: int) -> None: ... @@ -9089,6 +9603,12 @@ def groupRectangles( ]: ... +@typing.overload +def hasNonZero(src: cv2.typing.MatLike) -> bool: ... +@typing.overload +def hasNonZero(src: UMat) -> bool: ... + + def haveImageReader(filename: str) -> bool: ... @@ -10760,6 +11280,22 @@ def solveCubic( def solveCubic(coeffs: UMat, roots: UMat | None = ...) -> tuple[int, UMat]: ... +@typing.overload +def solveLP( + Func: cv2.typing.MatLike, + Constr: cv2.typing.MatLike, + constr_eps: float, + z: cv2.typing.MatLike | None = ..., +) -> tuple[ + int, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def solveLP(Func: UMat, Constr: UMat, constr_eps: float, z: UMat | None = ...) -> tuple[int, UMat]: ... + + @typing.overload def solveLP( Func: cv2.typing.MatLike, diff --git a/typings/cv2/gapi/__init__.pyi b/typings/cv2/gapi/__init__.pyi deleted file mode 100644 index 7b58de64..00000000 --- a/typings/cv2/gapi/__init__.pyi +++ /dev/null @@ -1,91 +0,0 @@ -class GOpaque: - def __new__(cls, argtype): ... - - class Bool: - def __new__(self): ... - - class Int: - def __new__(self): ... - - class Double: - def __new__(self): ... - - class Float: - def __new__(self): ... - - class String: - def __new__(self): ... - - class Point: - def __new__(self): ... - - class Point2f: - def __new__(self): ... - - class Point3f: - def __new__(self): ... - - class Size: - def __new__(self): ... - - class Rect: - def __new__(self): ... - - class Prim: - def __new__(self): ... - - class Any: - def __new__(self): ... - - -class GArray: - def __new__(cls, argtype): ... - - class Bool: - def __new__(self): ... - - class Int: - def __new__(self): ... - - class Double: - def __new__(self): ... - - class Float: - def __new__(self): ... - - class String: - def __new__(self): ... - - class Point: - def __new__(self): ... - - class Point2f: - def __new__(self): ... - - class Point3f: - def __new__(self): ... - - class Size: - def __new__(self): ... - - class Rect: - def __new__(self): ... - - class Scalar: - def __new__(self): ... - - class MatLike: - def __new__(self): ... - - class GMat: - def __new__(self): ... - - class Prim: - def __new__(self): ... - - class Any: - def __new__(self): ... - - -def op(op_id, in_types, out_types): ... -def kernel(op_cls): ... diff --git a/typings/cv2/gapi/streaming.pyi b/typings/cv2/gapi/streaming.pyi deleted file mode 100644 index 2c49b006..00000000 --- a/typings/cv2/gapi/streaming.pyi +++ /dev/null @@ -1,16 +0,0 @@ -from cv2.cv2 import GMat, GOpaqueT, gapi_streaming_queue_capacity -from typing_extensions import TypeAlias - -SYNC_POLICY_DONT_SYNC: int -SYNC_POLICY_DROP: int -sync_policy_dont_sync: int -sync_policy_drop: int - -queue_capacity: TypeAlias = gapi_streaming_queue_capacity - - -def desync(g: GMat) -> GMat: ... -def seqNo(arg1: GMat) -> GOpaqueT: ... -def seq_id(arg1: GMat) -> GOpaqueT: ... -def size(src: GMat) -> GOpaqueT: ... -def timestamp(arg1: GMat) -> GOpaqueT: ... diff --git a/typings/cv2/mat_wrapper/__init__.pyi b/typings/cv2/mat_wrapper/__init__.pyi index a3acfde5..db0f36e1 100644 --- a/typings/cv2/mat_wrapper/__init__.pyi +++ b/typings/cv2/mat_wrapper/__init__.pyi @@ -1,13 +1,12 @@ +import numpy as np from _typeshed import Unused -from cv2.typing import _NDArray +from typing_extensions import TypeAlias __all__: list[str] = [] +_NDArray: TypeAlias = np.ndarray[float, np.dtype[np.generic]] -# TODO: Make MatLike generic with int or float - - -class MatLike(_NDArray): +class Mat(_NDArray): wrap_channels: bool | None def __new__(cls, arr: _NDArray, wrap_channels: bool = ..., **kwargs: Unused) -> _NDArray: ... diff --git a/typings/cv2/typing.pyi b/typings/cv2/typing.pyi deleted file mode 100644 index 7f0cbd33..00000000 --- a/typings/cv2/typing.pyi +++ /dev/null @@ -1,34 +0,0 @@ -from collections.abc import Sequence - -import numpy as np -from cv2.mat_wrapper import MatLike as WrappedMat -from typing_extensions import TypeAlias - -_NDArray: TypeAlias = np.ndarray[float, np.dtype[np.generic]] -MatLike: TypeAlias = WrappedMat | _NDArray - -# Convertable to boolean -Boolean: TypeAlias = bool | int | None -# "a scalar" -NumericScalar: TypeAlias = float | bool | None -# cv::Scalar -Scalar: TypeAlias = MatLike | NumericScalar | Sequence[NumericScalar] -# cv::TermCriteria -TermCriteria: TypeAlias = tuple[int, int, float] | Sequence[float] -# cv::Point -Point: TypeAlias = tuple[int, int] | Sequence[int] -# cv::Size -Size: TypeAlias = tuple[int, int] | Sequence[int] -# cv::Range -Range: TypeAlias = tuple[int, int] | Sequence[int] -# cv::Point -Point2f: TypeAlias = tuple[float, float] | Sequence[float] -# cv::Size -SizeFloat: TypeAlias = tuple[float, float] | Sequence[float] -# cv::Rect -Rect: TypeAlias = tuple[int, int, int, int] | Sequence[int] -# cv::Rect -RectFloat: TypeAlias = tuple[int, int, int, int] | Sequence[int] -# cv::RotatedRect -RotatedRect: TypeAlias = tuple[Point2f, SizeFloat, float] | Sequence[Point2f | SizeFloat | float] -RotatedRectResult: TypeAlias = tuple[tuple[float, float], tuple[float, float], float] diff --git a/typings/multiprocessing/test_cases/check_pipe_connections.py b/typings/multiprocessing/test_cases/check_pipe_connections.py index 5c55de0a..eee9476b 100644 --- a/typings/multiprocessing/test_cases/check_pipe_connections.py +++ b/typings/multiprocessing/test_cases/check_pipe_connections.py @@ -3,6 +3,7 @@ from multiprocessing.connection import Pipe, PipeConnection # Less type-safe, but no extra variable. User could mix up send and recv types. +# This should be improvable with PEP 695: Type Parameter Syntax in Python 3.12 a: PipeConnection[str, int] b: PipeConnection[int, str] a, b = Pipe() @@ -16,10 +17,10 @@ a.send("test") a.send(0) # pyright: ignore[reportGeneralTypeIssues] -test: str = b.recv() +test1: str = b.recv() test2: int = b.recv() # pyright: ignore[reportGeneralTypeIssues] b.send("test") # pyright: ignore[reportGeneralTypeIssues] b.send(0) -test: str = a.recv() # pyright: ignore[reportGeneralTypeIssues] -test2: int = a.recv() +test3: str = a.recv() # pyright: ignore[reportGeneralTypeIssues] +test4: int = a.recv()