diff --git a/.editorconfig b/.editorconfig
index a9b080f1..cd5c1f1c 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -1,14 +1,14 @@
-root = true
-
-[*]
-end_of_line = crlf
-insert_final_newline = true
-charset = utf-8
-indent_style = space
-indent_size = 2
-
-[*.py]
-indent_size = 4
-
-[*.ui]
-indent_size = 1
+root = true
+
+[*]
+end_of_line = lf
+insert_final_newline = true
+charset = utf-8
+indent_style = space
+indent_size = 2
+
+[*.py]
+indent_size = 4
+
+[*.ui]
+indent_size = 1
diff --git a/.github/workflows/lint-and-build.yml b/.github/workflows/lint-and-build.yml
index 261c7caf..a5dcd70e 100644
--- a/.github/workflows/lint-and-build.yml
+++ b/.github/workflows/lint-and-build.yml
@@ -45,7 +45,7 @@ jobs:
fail-fast: false
# Ruff is version and platform sensible
matrix:
- python-version: ["3.9", "3.10", "3.11", "3.12"]
+ python-version: ["3.10", "3.11", "3.12"]
steps:
- name: Checkout ${{ github.repository }}/${{ github.ref }}
uses: actions/checkout@v3
@@ -58,25 +58,13 @@ jobs:
- run: scripts/install.ps1
shell: pwsh
- run: ruff check .
- add-trailing-comma:
- runs-on: windows-latest
- steps:
- - name: Checkout ${{ github.repository }}/${{ github.ref }}
- uses: actions/checkout@v3
- - name: Set up Python 3.12
- uses: actions/setup-python@v4
- with:
- python-version: "3.12"
- - run: pip install add-trailing-comma
- - name: Analysing the code with add-trailing-comma
- run: add-trailing-comma $(git ls-files '**.py*')
Pyright:
runs-on: windows-latest
strategy:
fail-fast: false
# Pyright is version and platform sensible
matrix:
- python-version: ["3.9", "3.10", "3.11", "3.12"]
+ python-version: ["3.10", "3.11", "3.12"]
steps:
- name: Checkout ${{ github.repository }}/${{ github.ref }}
uses: actions/checkout@v3
@@ -92,6 +80,7 @@ jobs:
uses: jakebailey/pyright-action@v1
with:
working-directory: src/
+ python-version: ${{ matrix.python-version }}
Build:
runs-on: windows-latest
strategy:
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 8020555f..5d64c44d 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -17,7 +17,7 @@ repos:
- id: pretty-format-ini
args: [--autofix]
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: "v0.0.292" # Must match requirements-dev.txt
+ rev: "v0.1.1" # Must match requirements-dev.txt
hooks:
- id: ruff
args: [--fix]
diff --git a/.sonarcloud.properties b/.sonarcloud.properties
index ff239edf..4a69364d 100644
--- a/.sonarcloud.properties
+++ b/.sonarcloud.properties
@@ -1 +1 @@
-sonar.python.version=3.9, 3.10, 3.11
+sonar.python.version=3.10, 3.11
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 2ede6e77..e6590170 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -14,6 +14,7 @@
"files.trimTrailingWhitespace": true,
"files.insertFinalNewline": true,
"files.trimFinalNewlines": true,
+ "files.eol": "\n",
"editor.comments.insertSpace": true,
"editor.insertSpaces": true,
"editor.detectIndentation": false,
@@ -62,7 +63,7 @@
"editor.defaultFormatter": "vscode.json-language-features",
},
"[python]": {
- // Ruff is not yet a formatter: https://github.com/charliermarsh/ruff/issues/1904
+ // Ruff as a formatter doesn't fully satisfy our needs yet: https://github.com/astral-sh/ruff/discussions/7310
"editor.defaultFormatter": "ms-python.autopep8",
"editor.tabSize": 4,
"editor.rulers": [
diff --git a/PyInstaller/hooks/hook-requests.py b/PyInstaller/hooks/hook-requests.py
index e1a554d0..13de4b6b 100644
--- a/PyInstaller/hooks/hook-requests.py
+++ b/PyInstaller/hooks/hook-requests.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
from PyInstaller.utils.hooks import collect_data_files
# Get the cacert.pem
diff --git a/README.md b/README.md
index 25e54138..c8a4e856 100644
--- a/README.md
+++ b/README.md
@@ -29,7 +29,7 @@ This program can be used to automatically start, split, and reset your preferred
### Compatibility
- Windows 10 and 11.
-- Python 3.9+ (Not required for normal use. Refer to the [build instructions](/docs/build%20instructions.md) if you'd like run the application directly in Python).
+- Python 3.10+ (Not required for normal use. Refer to the [build instructions](/docs/build%20instructions.md) if you'd like run the application directly in Python).
## OPTIONS
diff --git a/docs/build instructions.md b/docs/build instructions.md
index 49bc2004..3d700c87 100644
--- a/docs/build instructions.md
+++ b/docs/build instructions.md
@@ -8,7 +8,7 @@
### All platforms
-- [Python](https://www.python.org/downloads/) 3.9+.
+- [Python](https://www.python.org/downloads/) 3.10+.
- [Node](https://nodejs.org) is optional, but required for complete linting.
- Alternatively you can install the [pyright python wrapper](https://pypi.org/project/pyright/) which has a bit of an overhead delay.
- [PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell)
diff --git a/pyproject.toml b/pyproject.toml
index 06207457..6f7ea6ed 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,10 +1,10 @@
-# https://beta.ruff.rs/docs/configuration
+# https://docs.astral.sh/ruff/configuration/
[tool.ruff]
-target-version = "py39"
+target-version = "py310"
line-length = 120
select = ["ALL"]
preview = true
-# https://beta.ruff.rs/docs/rules
+# https://docs.astral.sh/ruff/rules/
ignore = [
###
# Not needed or wanted
@@ -21,6 +21,8 @@ ignore = [
"ERA001", # eradicate: commented-out-code
# contextlib.suppress is roughly 3x slower than try/except
"SIM105", # flake8-simplify: use-contextlib-suppress
+ # Negative performance impact
+ "UP038", # non-pep604-isinstance
# Checked by type-checker (pyright)
"ANN", # flake-annotations
"PGH003", # blanket-type-ignore
@@ -34,7 +36,7 @@ ignore = [
"TD001", # flake8-todos: invalid-todo-tag
###
- # These should be warnings (https://github.com/charliermarsh/ruff/issues/1256)
+ # These should be warnings (https://github.com/astral-sh/ruff/issues/1256 & https://github.com/astral-sh/ruff/issues/1774)
###
"FIX", # flake8-fixme
# Not all TODOs are worth an issue, this would be better as a warning
@@ -56,7 +58,7 @@ ignore = [
# Python 3.11, introduced "zero cost" exception handling
"PERF203", # try-except-in-loop
- ### FIXME/TODO (no warnings in Ruff yet: https://github.com/charliermarsh/ruff/issues/1256):
+ ### FIXME/TODO (no warnings in Ruff yet: https://github.com/astral-sh/ruff/issues/1256 & https://github.com/astral-sh/ruff/issues/1774):
"CPY001", # flake8-copyright
"PTH", # flake8-use-pathlib
# Ignore until linux support
@@ -66,7 +68,7 @@ ignore = [
[tool.ruff.per-file-ignores]
"typings/**/*.pyi" = [
"F811", # Re-exports false positives
- "F821", # https://github.com/charliermarsh/ruff/issues/3011
+ "F821", # https://github.com/astral-sh/ruff/issues/3011
# The following can't be controlled for external libraries:
"A", # Shadowing builtin names
"ICN001", # unconventional-import-alias
@@ -77,18 +79,16 @@ ignore = [
"PYI042", # CamelCase TypeAlias
]
-# https://beta.ruff.rs/docs/settings/#flake8-implicit-str-concat
+# https://docs.astral.sh/ruff/settings/#flake8-implicit-str-concat
[tool.ruff.flake8-implicit-str-concat]
allow-multiline = false
-# https://beta.ruff.rs/docs/settings/#isort
+# https://docs.astral.sh/ruff/settings/#isort
[tool.ruff.isort]
combine-as-imports = true
split-on-trailing-comma = false
-required-imports = ["from __future__ import annotations"]
# Unlike isort, Ruff only counts relative imports as local-folder by default for know.
-# https://github.com/charliermarsh/ruff/issues/2419
-# https://github.com/charliermarsh/ruff/issues/3115
+# https://github.com/astral-sh/ruff/issues/3115
known-local-folder = [
"AutoControlledThread",
"AutoSplit",
@@ -105,7 +105,7 @@ known-local-folder = [
"utils",
]
-# https://beta.ruff.rs/docs/settings/#mccabe
+# https://docs.astral.sh/ruff/settings/#mccabe
[tool.ruff.mccabe]
# Hard limit, arbitrary to 4 bytes
max-complexity = 31
@@ -128,7 +128,10 @@ ignore = [
"E124", # Closing bracket may not match multi-line method invocation style (enforced by add-trailing-comma)
"E70", # Allow ... on same line as def
# Autofixed by Ruff
- # Check for the "Fix" flag https://beta.ruff.rs/docs/rules/#pycodestyle-e-w
+ # Check for the "Fix" flag https://docs.astral.sh/ruff/rules/#pycodestyle-e-w
+ "E20", # whitespace-after-* & whitespace-before-*
+ "E211", # whitespace-before-parameters
+ "E231", # missing-whitespace
"E401", # I001: unsorted-imports
"E71", # Comparisons
"E731", # lambda-assignment
@@ -139,6 +142,7 @@ ignore = [
# https://github.com/microsoft/pyright/blob/main/docs/configuration.md#sample-pyprojecttoml-file
[tool.pyright]
typeCheckingMode = "strict"
+pythonVersion = "3.10"
# Prefer `pyright: ignore`
enableTypeIgnoreComments = false
# Extra strict
diff --git a/res/about.ui b/res/about.ui
index b0e88fd9..861300cb 100644
--- a/res/about.ui
+++ b/res/about.ui
@@ -1,158 +1,158 @@
-
-
- Toufool
- AboutAutoSplitWidget
-
-
-
- 0
- 0
- 264
- 250
-
-
-
-
- 264
- 250
-
-
-
-
- 264
- 250
-
-
-
-
- 9
-
-
-
- About AutoSplit
-
-
-
- :/resources/icon.ico:/resources/icon.ico
-
-
-
-
- 180
- 220
- 75
- 24
-
-
-
- OK
-
-
-
-
-
- 10
- 44
- 241
- 32
-
-
-
- <html><head/><body><p>Created by <a href="https://twitter.com/toufool"><span style=" text-decoration: underline; color:#0000ff;">Toufool</span></a> and <a href="https://twitter.com/faschz"><span style=" text-decoration: underline; color:#0000ff;">Faschz</span></a><br/>Maintained by <a href="https://twitter.com/Avasam06"><span style=" text-decoration: underline; color:#0000ff;">Avasam</span></a></p></body></html>
-
-
-
-
-
- 10
- 21
- 241
- 16
-
-
-
- Version:
-
-
-
-
-
- 10
- 90
- 241
- 51
-
-
-
- If you enjoy using this program,
-please consider donating.
-Thank you!
-
-
- Qt::AlignCenter
-
-
-
-
-
- 60
- 150
- 147
- 51
-
-
-
- <html><head/><body><p><a href="https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=BYRHQG69YRHBA&item_name=AutoSplit+development&currency_code=USD&source=url"><img src=":/resources/btn_donateCC_LG.png"/></a></p></body></html>
-
-
- Qt::AlignCenter
-
-
-
-
-
- 190
- 17
- 64
- 64
-
-
-
-
-
-
- :/resources/icon.ico
-
-
- true
-
-
- icon_label
- donate_text_label
- version_label
- created_by_label
- ok_button
- donate_button_label
-
-
-
-
-
-
- ok_button
- clicked()
- AboutAutoSplitWidget
- close()
-
-
- 225
- 210
-
-
- 153
- 114
-
-
-
-
-
+
+
+ Toufool
+ AboutAutoSplitWidget
+
+
+
+ 0
+ 0
+ 264
+ 250
+
+
+
+
+ 264
+ 250
+
+
+
+
+ 264
+ 250
+
+
+
+
+ 9
+
+
+
+ About AutoSplit
+
+
+
+ :/resources/icon.ico:/resources/icon.ico
+
+
+
+
+ 180
+ 220
+ 75
+ 24
+
+
+
+ OK
+
+
+
+
+
+ 10
+ 44
+ 241
+ 32
+
+
+
+ <html><head/><body><p>Created by <a href="https://twitter.com/toufool"><span style=" text-decoration: underline; color:#0000ff;">Toufool</span></a> and <a href="https://twitter.com/faschz"><span style=" text-decoration: underline; color:#0000ff;">Faschz</span></a><br/>Maintained by <a href="https://twitter.com/Avasam06"><span style=" text-decoration: underline; color:#0000ff;">Avasam</span></a></p></body></html>
+
+
+
+
+
+ 10
+ 21
+ 241
+ 16
+
+
+
+ Version:
+
+
+
+
+
+ 10
+ 90
+ 241
+ 51
+
+
+
+ If you enjoy using this program,
+please consider donating.
+Thank you!
+
+
+ Qt::AlignCenter
+
+
+
+
+
+ 60
+ 150
+ 147
+ 51
+
+
+
+ <html><head/><body><p><a href="https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=BYRHQG69YRHBA&item_name=AutoSplit+development&currency_code=USD&source=url"><img src=":/resources/btn_donateCC_LG.png"/></a></p></body></html>
+
+
+ Qt::AlignCenter
+
+
+
+
+
+ 190
+ 17
+ 64
+ 64
+
+
+
+
+
+
+ :/resources/icon.ico
+
+
+ true
+
+
+ icon_label
+ donate_text_label
+ version_label
+ created_by_label
+ ok_button
+ donate_button_label
+
+
+
+
+
+
+ ok_button
+ clicked()
+ AboutAutoSplitWidget
+ close()
+
+
+ 225
+ 210
+
+
+ 153
+ 114
+
+
+
+
+
diff --git a/scripts/install.ps1 b/scripts/install.ps1
index 284633e1..eda5612f 100644
--- a/scripts/install.ps1
+++ b/scripts/install.ps1
@@ -6,12 +6,19 @@ pip install -r "$PSScriptRoot/requirements$dev.txt" --upgrade
# Patch libraries so we don't have to install from git
-# Prevent pyautogui from setting Process DPI Awareness, which Qt tries to do then throws warnings about it.
+# Prevent pyautogui and pywinctl from setting Process DPI Awareness, which Qt tries to do then throws warnings about it.
# The unittest workaround significantly increases build time, boot time and build size with PyInstaller.
# https://github.com/asweigart/pyautogui/issues/663#issuecomment-1296719464
-$pyautoguiPath = python -c 'import pyautogui as _; print(_.__path__[0])'
-(Get-Content "$pyautoguiPath/_pyautogui_win.py").replace('ctypes.windll.user32.SetProcessDPIAware()', 'pass') |
- Set-Content "$pyautoguiPath/_pyautogui_win.py"
+$libPath = python -c 'import pyautogui as _; print(_.__path__[0])'
+(Get-Content "$libPath/_pyautogui_win.py").replace('ctypes.windll.user32.SetProcessDPIAware()', 'pass') |
+ Set-Content "$libPath/_pyautogui_win.py"
+$libPath = python -c 'import pymonctl as _; print(_.__path__[0])'
+(Get-Content "$libPath/_pymonctl_win.py").replace('ctypes.windll.shcore.SetProcessDpiAwareness(2)', 'pass') |
+ Set-Content "$libPath/_pymonctl_win.py"
+$libPath = python -c 'import pywinbox as _; print(_.__path__[0])'
+(Get-Content "$libPath/_pywinbox_win.py").replace('ctypes.windll.shcore.SetProcessDpiAwareness(2)', 'pass') |
+ Set-Content "$libPath/_pywinbox_win.py"
+# Uninstall optional dependencies that PyInstaller picks up
python -m pip uninstall pyscreeze mouseinfo pyperclip -y
diff --git a/scripts/requirements-dev.txt b/scripts/requirements-dev.txt
index aa81f8fc..ffdc1ed1 100644
--- a/scripts/requirements-dev.txt
+++ b/scripts/requirements-dev.txt
@@ -14,7 +14,7 @@
# Linters & Formatters
add-trailing-comma>=3.1.0 # Must match .pre-commit-config.yaml
autopep8>=2.0.4 # Must match .pre-commit-config.yaml
-ruff>=0.0.292 # Ignore @override for bad-dunder-name # Must match .pre-commit-config.yaml
+ruff>=0.1.1 # New checks # Must match .pre-commit-config.yaml
#
# Types
types-D3DShot ; sys_platform == 'win32'
diff --git a/scripts/requirements.txt b/scripts/requirements.txt
index 0c3cf5ed..6edb993b 100644
--- a/scripts/requirements.txt
+++ b/scripts/requirements.txt
@@ -28,7 +28,7 @@ pyinstaller-hooks-contrib>=2022.15 # charset-normalizer fix https://github.com/
# https://peps.python.org/pep-0508/#environment-markers
#
# Windows-only dependencies:
-git+https://github.com/andreaschiavinato/python_grabber.git#egg=pygrabber ; sys_platform == 'win32' # Completed types
+pygrabber>=0.2 ; sys_platform == 'win32' # Completed types
pywin32>=301 ; sys_platform == 'win32'
winsdk>=1.0.0b10 ; sys_platform == 'win32' # Python 3.12 support
git+https://github.com/ranchen421/D3DShot.git#egg=D3DShot ; sys_platform == 'win32' # D3DShot from PyPI with Pillow>=7.2.0 will install 0.1.3 instead of 0.1.5
diff --git a/src/AutoControlledThread.py b/src/AutoControlledThread.py
index f5e518a8..45aa453f 100644
--- a/src/AutoControlledThread.py
+++ b/src/AutoControlledThread.py
@@ -1,46 +1,46 @@
-from __future__ import annotations
-
-from typing import TYPE_CHECKING
-
-from PySide6 import QtCore
-
-import error_messages
-import user_profile
-
-if TYPE_CHECKING:
- from AutoSplit import AutoSplit
-
-
-class AutoControlledThread(QtCore.QThread):
- def __init__(self, autosplit: AutoSplit):
- self.autosplit = autosplit
- super().__init__()
-
- @QtCore.Slot()
- def run(self):
- while True:
- try:
- line = input()
- except RuntimeError:
- self.autosplit.show_error_signal.emit(error_messages.stdin_lost)
- break
- except EOFError:
- continue
- # This is for use in a Development environment
- if line == "kill":
- self.autosplit.closeEvent()
- break
- if line == "start":
- self.autosplit.start_auto_splitter()
- elif line in {"split", "skip"}:
- self.autosplit.skip_split_signal.emit()
- elif line == "undo":
- self.autosplit.undo_split_signal.emit()
- elif line == "reset":
- self.autosplit.reset_signal.emit()
- elif line.startswith("settings"):
- # Allow for any split character between "settings" and the path
- user_profile.load_settings(self.autosplit, line[9:])
- # TODO: Not yet implemented in AutoSplit Integration
- # elif line == 'pause':
- # self.pause_signal.emit()
+from typing import TYPE_CHECKING
+
+from PySide6 import QtCore
+
+import error_messages
+import user_profile
+
+if TYPE_CHECKING:
+ from AutoSplit import AutoSplit
+
+
+class AutoControlledThread(QtCore.QThread):
+ def __init__(self, autosplit: "AutoSplit"):
+ self.autosplit = autosplit
+ super().__init__()
+
+ @QtCore.Slot()
+ def run(self):
+ while True:
+ try:
+ line = input()
+ except RuntimeError:
+ self.autosplit.show_error_signal.emit(error_messages.stdin_lost)
+ break
+ except EOFError:
+ continue
+ match line:
+ # This is for use in a Development environment
+ case "kill":
+ self.autosplit.closeEvent()
+ break
+ case "start":
+ self.autosplit.start_auto_splitter()
+ case "split" | "skip":
+ self.autosplit.skip_split_signal.emit()
+ case "undo":
+ self.autosplit.undo_split_signal.emit()
+ case "reset":
+ self.autosplit.reset_signal.emit()
+ # TODO: Not yet implemented in AutoSplit Integration
+ # case 'pause':
+ # self.pause_signal.emit()
+ case line:
+ if line.startswith("settings"):
+ # Allow for any split character between "settings" and the path
+ user_profile.load_settings(self.autosplit, line[9:])
diff --git a/src/AutoSplit.py b/src/AutoSplit.py
index 25261195..c5918ba9 100644
--- a/src/AutoSplit.py
+++ b/src/AutoSplit.py
@@ -1,987 +1,989 @@
-#!/usr/bin/python3
-from __future__ import annotations
-
-import ctypes
-import os
-import signal
-import sys
-from collections.abc import Callable
-from time import time
-from types import FunctionType
-from typing import NoReturn
-
-import certifi
-import cv2
-from cv2.typing import MatLike
-from psutil import process_iter
-from PySide6 import QtCore, QtGui
-from PySide6.QtTest import QTest
-from PySide6.QtWidgets import QApplication, QFileDialog, QLabel, QMainWindow, QMessageBox
-from typing_extensions import override
-
-import error_messages
-import user_profile
-from AutoControlledThread import AutoControlledThread
-from AutoSplitImage import START_KEYWORD, AutoSplitImage, ImageType
-from capture_method import CaptureMethodBase, CaptureMethodEnum
-from gen import about, design, settings, update_checker
-from hotkeys import HOTKEYS, after_setting_hotkey, send_command
-from menu_bar import (
- about_qt,
- about_qt_for_python,
- check_for_updates,
- get_default_settings_from_ui,
- open_about,
- open_settings,
- open_update_checker,
- view_help,
-)
-from region_selection import align_region, select_region, select_window, validate_before_parsing
-from split_parser import BELOW_FLAG, DUMMY_FLAG, PAUSE_FLAG, parse_and_validate_images
-from user_profile import DEFAULT_PROFILE
-from utils import (
- AUTOSPLIT_VERSION,
- BGRA_CHANNEL_COUNT,
- FROZEN,
- auto_split_directory,
- decimal,
- flatten,
- is_valid_image,
- open_file,
-)
-
-CHECK_FPS_ITERATIONS = 10
-DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2 = 2
-
-# Needed when compiled, along with the custom hook-requests PyInstaller hook
-os.environ["REQUESTS_CA_BUNDLE"] = certifi.where()
-myappid = f"Toufool.AutoSplit.v{AUTOSPLIT_VERSION}"
-ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
-
-
-class AutoSplit(QMainWindow, design.Ui_MainWindow):
- # Parse command line args
- is_auto_controlled = "--auto-controlled" in sys.argv
-
- # Signals
- start_auto_splitter_signal = QtCore.Signal()
- reset_signal = QtCore.Signal()
- skip_split_signal = QtCore.Signal()
- undo_split_signal = QtCore.Signal()
- pause_signal = QtCore.Signal()
- screenshot_signal = QtCore.Signal()
- after_setting_hotkey_signal = QtCore.Signal()
- update_checker_widget_signal = QtCore.Signal(str, bool)
- load_start_image_signal = QtCore.Signal(bool, bool)
- # Use this signal when trying to show an error from outside the main thread
- show_error_signal = QtCore.Signal(FunctionType)
-
- # Timers
- timer_live_image = QtCore.QTimer()
- timer_start_image = QtCore.QTimer()
-
- # Widgets
- AboutWidget: about.Ui_AboutAutoSplitWidget | None = None
- UpdateCheckerWidget: update_checker.Ui_UpdateChecker | None = None
- CheckForUpdatesThread: QtCore.QThread | None = None
- SettingsWidget: settings.Ui_SettingsWidget | None = None
-
- def __init__(self): # noqa: PLR0915
- super().__init__()
-
- # Initialize a few attributes
- self.hwnd = 0
- """Window Handle used for Capture Region"""
- self.last_saved_settings = DEFAULT_PROFILE
- self.similarity = 0.0
- self.split_image_number = 0
- self.split_images_and_loop_number: list[tuple[AutoSplitImage, int]] = []
- self.split_groups: list[list[int]] = []
- self.capture_method = CaptureMethodBase(None)
- self.is_running = False
-
- # Last loaded settings empty and last successful loaded settings file path to None until we try to load them
- self.last_loaded_settings = DEFAULT_PROFILE
- self.last_successfully_loaded_settings_file_path: str | None = None
- """For when a file has never loaded, but you successfully "Save File As"."""
-
- # Automatic timer start
- self.highest_similarity = 0.0
- self.reset_highest_similarity = 0.0
-
- # Ensure all other attributes are defined
- self.waiting_for_split_delay = False
- self.split_below_threshold = False
- self.run_start_time = 0.0
- self.start_image: AutoSplitImage | None = None
- self.reset_image: AutoSplitImage | None = None
- self.split_images: list[AutoSplitImage] = []
- self.split_image: AutoSplitImage | None = None
- self.update_auto_control: AutoControlledThread | None = None
-
- # Setup global error handling
- def _show_error_signal_slot(error_message_box: Callable[..., object]):
- return error_message_box()
- self.show_error_signal.connect(_show_error_signal_slot)
- sys.excepthook = error_messages.make_excepthook(self)
-
- self.setupUi(self)
- self.setWindowTitle(
- f"AutoSplit v{AUTOSPLIT_VERSION}" +
- (" (externally controlled)" if self.is_auto_controlled else ""),
- )
-
- # Hotkeys need to be initialized to be passed as thread arguments in hotkeys.py
- for hotkey in HOTKEYS:
- setattr(self, f"{hotkey}_hotkey", None)
-
- # Get default values defined in SettingsDialog
- self.settings_dict = get_default_settings_from_ui(self)
- user_profile.load_check_for_updates_on_open(self)
-
- if self.is_auto_controlled:
- self.start_auto_splitter_button.setEnabled(False)
-
- # Send version and process ID to stdout
- # THIS HAS TO BE THE FIRST TWO LINES SENT
- print(f"{AUTOSPLIT_VERSION}\n{os.getpid()}", flush=True)
-
- # Use and Start the thread that checks for updates from LiveSplit
- self.update_auto_control = AutoControlledThread(self)
- self.update_auto_control.start()
-
- # split image folder line edit text
- self.split_image_folder_input.setText("No Folder Selected")
-
- # Connecting menu actions
- self.action_view_help.triggered.connect(view_help)
- self.action_about.triggered.connect(lambda: open_about(self))
- self.action_about_qt.triggered.connect(about_qt)
- self.action_about_qt_for_python.triggered.connect(about_qt_for_python)
- self.action_check_for_updates.triggered.connect(lambda: check_for_updates(self))
- self.action_settings.triggered.connect(lambda: open_settings(self))
- self.action_save_profile.triggered.connect(lambda: user_profile.save_settings(self))
- self.action_save_profile_as.triggered.connect(lambda: user_profile.save_settings_as(self))
- self.action_load_profile.triggered.connect(lambda: user_profile.load_settings(self))
-
- # Connecting button clicks to functions
- self.split_image_folder_button.clicked.connect(self.__browse)
- self.select_region_button.clicked.connect(lambda: select_region(self))
- self.take_screenshot_button.clicked.connect(self.__take_screenshot)
- self.start_auto_splitter_button.clicked.connect(self.__auto_splitter)
- self.check_fps_button.clicked.connect(self.__check_fps)
- self.reset_button.clicked.connect(self.reset)
- self.skip_split_button.clicked.connect(self.skip_split)
- self.undo_split_button.clicked.connect(self.undo_split)
- self.next_image_button.clicked.connect(lambda: self.skip_split(True))
- self.previous_image_button.clicked.connect(lambda: self.undo_split(True))
- self.align_region_button.clicked.connect(lambda: align_region(self))
- self.select_window_button.clicked.connect(lambda: select_window(self))
- self.reload_start_image_button.clicked.connect(lambda: self.__load_start_image(True, True))
- self.action_check_for_updates_on_open.changed.connect(
- lambda: user_profile.set_check_for_updates_on_open(self, self.action_check_for_updates_on_open.isChecked()),
- )
-
- # update x, y, width, and height when changing the value of these spinbox's are changed
- self.x_spinbox.valueChanged.connect(self.__update_x)
- self.y_spinbox.valueChanged.connect(self.__update_y)
- self.width_spinbox.valueChanged.connect(self.__update_width)
- self.height_spinbox.valueChanged.connect(self.__update_height)
-
- # connect signals to functions
- self.after_setting_hotkey_signal.connect(lambda: after_setting_hotkey(self))
- self.start_auto_splitter_signal.connect(self.__auto_splitter)
-
- def _update_checker_widget_signal_slot(latest_version: str, check_on_open: bool):
- return open_update_checker(self, latest_version, check_on_open)
- self.update_checker_widget_signal.connect(_update_checker_widget_signal_slot)
-
- self.load_start_image_signal.connect(self.__load_start_image)
- self.reset_signal.connect(self.reset)
- self.skip_split_signal.connect(self.skip_split)
- self.undo_split_signal.connect(self.undo_split)
- self.pause_signal.connect(self.pause)
- self.screenshot_signal.connect(self.__take_screenshot)
-
- # live image checkbox
- self.timer_live_image.timeout.connect(lambda: self.__update_live_image_details(None, True))
- self.timer_live_image.start(int(1000 / self.settings_dict["fps_limit"]))
-
- # Automatic timer start
- self.timer_start_image.timeout.connect(self.__start_image_function)
-
- self.show()
-
- try:
- import pyi_splash # pyright: ignore[reportMissingModuleSource]
- pyi_splash.close()
- except ModuleNotFoundError:
- pass
-
- # Needs to be after Ui_MainWindow.show() to be shown on top
- if not self.is_auto_controlled:
- # Must also be done later to help load the saved capture window
- user_profile.load_settings_on_open(self)
- if self.action_check_for_updates_on_open.isChecked():
- check_for_updates(self, check_on_open=True)
-
- # FUNCTIONS
-
- def __browse(self):
- # User selects the file with the split images in it.
- new_split_image_directory = QFileDialog.getExistingDirectory(
- self,
- "Select Split Image Directory",
- os.path.join(self.settings_dict["split_image_directory"] or auto_split_directory, ".."),
- )
-
- # If the user doesn't select a folder, it defaults to "".
- if new_split_image_directory:
- # set the split image folder line to the directory text
- self.settings_dict["split_image_directory"] = new_split_image_directory
- self.split_image_folder_input.setText(f"{new_split_image_directory}/")
- self.load_start_image_signal.emit(False, True)
-
- def __update_live_image_details(self, capture: MatLike | None, called_from_timer: bool = False):
- # HACK: Since this is also called in __get_capture_for_comparison,
- # we don't need to update anything if the app is running
- if called_from_timer:
- if self.is_running or self.start_image:
- return
- capture, _ = self.capture_method.get_frame(self)
-
- # Update title from target window or Capture Device name
- capture_region_window_label = self.settings_dict["capture_device_name"] \
- if self.settings_dict["capture_method"] == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE \
- else self.settings_dict["captured_window_title"]
- self.capture_region_window_label.setText(capture_region_window_label)
-
- # Simply clear if "live capture region" setting is off
- if not (self.settings_dict["live_capture_region"] and capture_region_window_label):
- self.live_image.clear()
- # Set live image in UI
- else:
- set_preview_image(self.live_image, capture)
-
- def __load_start_image(self, started_by_button: bool = False, wait_for_delay: bool = True):
- """Not thread safe (if triggered by LiveSplit for example). Use `load_start_image_signal.emit` instead."""
- self.timer_start_image.stop()
- self.current_image_file_label.setText("-")
- self.start_image_status_value_label.setText("not found")
- set_preview_image(self.current_split_image, None)
-
- if not (validate_before_parsing(self, started_by_button) and parse_and_validate_images(self)):
- QApplication.processEvents()
- return
-
- if not self.start_image:
- if started_by_button:
- error_messages.no_keyword_image(START_KEYWORD)
- QApplication.processEvents()
- return
-
- self.split_image_number = 0
-
- if not wait_for_delay and self.start_image.get_pause_time(self) > 0:
- self.start_image_status_value_label.setText("paused")
- self.table_current_image_highest_label.setText("-")
- self.table_current_image_threshold_label.setText("-")
- else:
- self.start_image_status_value_label.setText("ready")
- self.__update_split_image(self.start_image)
-
- self.highest_similarity = 0.0
- self.reset_highest_similarity = 0.0
- self.split_below_threshold = False
- self.timer_start_image.start(int(1000 / self.settings_dict["fps_limit"]))
-
- QApplication.processEvents()
-
- def __start_image_function(self):
- if not self.start_image:
- return
-
- self.start_image_status_value_label.setText("ready")
- self.__update_split_image(self.start_image)
-
- capture, _ = self.__get_capture_for_comparison()
- start_image_threshold = self.start_image.get_similarity_threshold(self)
- start_image_similarity = self.start_image.compare_with_capture(self, capture)
-
- # If the similarity becomes higher than highest similarity, set it as such.
- if start_image_similarity > self.highest_similarity:
- self.highest_similarity = start_image_similarity
-
- self.table_current_image_live_label.setText(decimal(start_image_similarity))
- self.table_current_image_highest_label.setText(decimal(self.highest_similarity))
- self.table_current_image_threshold_label.setText(decimal(start_image_threshold))
-
- # If the {b} flag is set, let similarity go above threshold first, then split on similarity below threshold
- # Otherwise just split when similarity goes above threshold
- # TODO: Abstract with similar check in split image
- below_flag = self.start_image.check_flag(BELOW_FLAG)
-
- # Negative means belove threshold, positive means above
- similarity_diff = start_image_similarity - start_image_threshold
- if below_flag and not self.split_below_threshold and similarity_diff >= 0:
- self.split_below_threshold = True
- return
- if (
- (below_flag and self.split_below_threshold and similarity_diff < 0 and is_valid_image(capture))
- or (not below_flag and similarity_diff >= 0)
- ):
-
- self.timer_start_image.stop()
- self.split_below_threshold = False
-
- if not self.start_image.check_flag(DUMMY_FLAG):
- # Delay Start Image if needed
- if self.start_image.get_delay_time(self) > 0:
- self.start_image_status_value_label.setText("delaying start...")
- delay_start_time = time()
- start_delay = self.start_image.get_delay_time(self) / 1000
- time_delta = 0.0
- while time_delta < start_delay:
- delay_time_left = start_delay - time_delta
- self.current_split_image.setText(
- f"Delayed Before Starting:\n {seconds_remaining_text(delay_time_left)}",
- )
- # Wait 0.1s. Doesn't need to be shorter as we only show 1 decimal
- QTest.qWait(100)
- time_delta = time() - delay_start_time
- send_command(self, "start")
-
- self.start_image_status_value_label.setText("started")
- self.start_auto_splitter()
-
- # update x, y, width, height when spinbox values are changed
- def __update_x(self):
- self.settings_dict["capture_region"]["x"] = self.x_spinbox.value()
-
- def __update_y(self):
- self.settings_dict["capture_region"]["y"] = self.y_spinbox.value()
-
- def __update_width(self):
- self.settings_dict["capture_region"]["width"] = self.width_spinbox.value()
-
- def __update_height(self):
- self.settings_dict["capture_region"]["height"] = self.height_spinbox.value()
-
- def __take_screenshot(self):
- if not validate_before_parsing(self, check_empty_directory=False):
- return
-
- # Check if file exists and rename it if it does.
- # Below starts the file_name_number at #001 up to #999. After that it will go to 1000,
- # which is a problem, but I doubt anyone will get to 1000 split images...
- screenshot_index = 1
- while True:
- screenshot_path = os.path.join(
- self.settings_dict["screenshot_directory"] or self.settings_dict["split_image_directory"],
- f"{screenshot_index:03}_SplitImage.png",
- )
- if not os.path.exists(screenshot_path):
- break
- screenshot_index += 1
-
- # Grab screenshot of capture region
- capture, _ = self.capture_method.get_frame(self)
- if not is_valid_image(capture):
- error_messages.region()
- return
-
- # Save and open image
- cv2.imwrite(screenshot_path, capture)
- if self.settings_dict["open_screenshot"]:
- open_file(screenshot_path)
-
- def __check_fps(self):
- self.fps_value_label.setText("...")
- QApplication.processEvents()
- if not (validate_before_parsing(self) and parse_and_validate_images(self)):
- self.fps_value_label.clear()
- return
-
- images = self.split_images
- if self.start_image:
- images.append(self.start_image)
- if self.reset_image:
- images.append(self.reset_image)
-
- # run X iterations of screenshotting capture region + comparison + displaying.
- t0 = time()
- for image in images:
- count = 0
- while count < CHECK_FPS_ITERATIONS:
- capture, is_old_image = self.__get_capture_for_comparison()
- _ = image.compare_with_capture(self, capture)
- # TODO: If is_old_image=true is always returned, this becomes an infinite loop
- if not is_old_image:
- count += 1
-
- # calculate FPS
- t1 = time()
- fps = int((CHECK_FPS_ITERATIONS * len(images)) / (t1 - t0))
- self.fps_value_label.setText(str(fps))
-
- def __is_current_split_out_of_range(self):
- return self.split_image_number < 0 \
- or self.split_image_number > len(self.split_images_and_loop_number) - 1
-
- def undo_split(self, navigate_image_only: bool = False):
- """Undo Split" and "Prev. Img." buttons connect to here."""
- # Can't undo until timer is started
- # or Undoing past the first image
- if (
- not self.is_running
- or "Delayed Split" in self.current_split_image.text()
- or (not self.undo_split_button.isEnabled() and not self.is_auto_controlled)
- or self.__is_current_split_out_of_range()
- ):
- return
-
- if not navigate_image_only:
- for i, group in enumerate(self.split_groups):
- if i > 0 and self.split_image_number in group:
- self.split_image_number = self.split_groups[i - 1][-1]
- break
- else:
- self.split_image_number -= 1
-
- self.__update_split_image()
- if not navigate_image_only:
- send_command(self, "undo")
-
- def skip_split(self, navigate_image_only: bool = False):
- """Skip Split" and "Next Img." buttons connect to here."""
- # Can't skip or split until timer is started
- # or Splitting/skipping when there are no images left
- if not self.is_running \
- or "Delayed Split" in self.current_split_image.text() \
- or not (self.skip_split_button.isEnabled() or self.is_auto_controlled or navigate_image_only) \
- or self.__is_current_split_out_of_range():
- return
-
- if not navigate_image_only:
- for group in self.split_groups:
- if self.split_image_number in group:
- self.split_image_number = group[-1] + 1
- break
- else:
- self.split_image_number += 1
-
- self.__update_split_image()
- if not navigate_image_only:
- send_command(self, "skip")
-
- def pause(self):
- # TODO: add what to do when you hit pause hotkey, if this even needs to be done
- pass
-
- def reset(self):
- """
- When the reset button or hotkey is pressed, it will set `is_running` to False,
- which will trigger in the __auto_splitter function, if running, to abort and change GUI.
- """
- self.is_running = False
-
- # Functions for the hotkeys to return to the main thread from signals and start their corresponding functions
- def start_auto_splitter(self):
- # If the auto splitter is already running or the button is disabled, don't emit the signal to start it.
- if self.is_running \
- or (not self.start_auto_splitter_button.isEnabled() and not self.is_auto_controlled):
- return
-
- start_label: str = self.start_image_status_value_label.text()
- if start_label.endswith(("ready", "paused")):
- self.start_image_status_value_label.setText("not ready")
-
- self.start_auto_splitter_signal.emit()
-
- def __check_for_reset_state_update_ui(self):
- """Check if AutoSplit is started, if not then update the GUI."""
- if not self.is_running:
- self.gui_changes_on_reset(True)
- return True
- return False
-
- def __auto_splitter(self): # noqa: PLR0912,PLR0915
- if not self.settings_dict["split_hotkey"] and not self.is_auto_controlled:
- self.gui_changes_on_reset(True)
- error_messages.split_hotkey()
- return
-
- # Set start time before parsing the images as it's a heavy operation that will cause delays
- self.run_start_time = time()
-
- if not (validate_before_parsing(self) and parse_and_validate_images(self)):
- # `safe_to_reload_start_image: bool = False` becasue __load_start_image also does this check,
- # we don't want to double a Start/Reset Image error message
- self.gui_changes_on_reset(False)
- return
-
- # Construct a list of images + loop count tuples.
- self.split_images_and_loop_number = list(
- flatten(
- ((split_image, i + 1) for i in range(split_image.loops))
- for split_image
- in self.split_images
- ),
- )
-
- # Construct groups of splits
- self.split_groups = []
- dummy_splits_array = []
- number_of_split_images = len(self.split_images_and_loop_number)
- current_group: list[int] = []
- self.split_groups.append(current_group)
- for i, image in enumerate(self.split_images_and_loop_number):
- current_group.append(i)
- dummy = image[0].check_flag(DUMMY_FLAG)
- dummy_splits_array.append(dummy)
- if not dummy and i < number_of_split_images - 1:
- current_group = []
- self.split_groups.append(current_group)
-
- self.is_running = True
- self.gui_changes_on_start()
-
- # Start pause time
- if self.start_image:
- self.__pause_loop(self.start_image.get_pause_time(self), "None (Paused).")
-
- # Initialize a few attributes
- self.split_image_number = 0
- self.waiting_for_split_delay = False
- self.split_below_threshold = False
- split_time = 0
-
- # First loop: stays in this loop until all of the split images have been split
- while self.split_image_number < number_of_split_images:
-
- # Check if we are not waiting for the split delay to send the key press
- if self.waiting_for_split_delay:
- time_millis = int(round(time() * 1000))
- if time_millis < split_time:
- QApplication.processEvents()
- continue
-
- self.__update_split_image()
-
- # Type checking
- if not self.split_image:
- return
-
- # Second loop: stays in this loop until similarity threshold is met
- if self.__similarity_threshold_loop(number_of_split_images, dummy_splits_array):
- return
-
- # We need to make sure that this isn't a dummy split before sending the key press.
- if not self.split_image.check_flag(DUMMY_FLAG):
- # If it's a delayed split, check if the delay has passed
- # Otherwise calculate the split time for the key press
- split_delay = self.split_image.get_delay_time(self) / 1000
- if split_delay > 0 and not self.waiting_for_split_delay:
- split_time = round(time() + split_delay * 1000)
- self.waiting_for_split_delay = True
- buttons_to_disable = [
- self.next_image_button,
- self.previous_image_button,
- self.undo_split_button,
- self.skip_split_button,
- ]
- for button in buttons_to_disable:
- button.setEnabled(False)
- self.current_image_file_label.clear()
-
- # check for reset while delayed and display a counter of the remaining split delay time
- if self.__pause_loop(split_delay, "Delayed Split:"):
- return
-
- for button in buttons_to_disable:
- button.setEnabled(True)
-
- self.waiting_for_split_delay = False
-
- # if {p} flag hit pause key, otherwise hit split hotkey
- send_command(self, "pause" if self.split_image.check_flag(PAUSE_FLAG) else "split")
-
- # if loop check box is checked and its the last split, go to first split.
- # else go to the next split image.
- if self.settings_dict["loop_splits"] and self.split_image_number == number_of_split_images - 1:
- self.split_image_number = 0
- else:
- self.split_image_number += 1
-
- # If its not the last split image, pause for the amount set by the user
- # A pause loop to check if the user presses skip split, undo split, or reset here.
- # Also updates the current split image text, counting down the time until the next split image
- if self.__pause_loop(self.split_image.get_pause_time(self), "None (Paused)."):
- return
-
- # loop breaks to here when the last image splits
- self.is_running = False
- self.gui_changes_on_reset(True)
-
- def __similarity_threshold_loop(self, number_of_split_images: int, dummy_splits_array: list[bool]):
- """
- Wait until the similarity threshold is met.
-
- Returns True if the loop was interrupted by a reset.
- """
- # Type checking
- if not self.split_image:
- return False
-
- start = time()
- while True:
- capture, _ = self.__get_capture_for_comparison()
-
- if self.__reset_if_should(capture):
- return True
-
- similarity = self.split_image.compare_with_capture(self, capture)
-
- # Show live similarity
- self.table_current_image_live_label.setText(decimal(similarity))
-
- # if the similarity becomes higher than highest similarity, set it as such.
- if similarity > self.highest_similarity:
- self.highest_similarity = similarity
-
- # show live highest similarity if the checkbox is checked
- self.table_current_image_highest_label.setText(decimal(self.highest_similarity))
-
- # If its the last split image and last loop number, disable the next image button
- # If its the first split image, disable the undo split and previous image buttons
- self.next_image_button.setEnabled(self.split_image_number != number_of_split_images - 1)
- self.previous_image_button.setEnabled(self.split_image_number != 0)
- if not self.is_auto_controlled:
- # If its the last non-dummy split image and last loop number, disable the skip split button
- self.skip_split_button.setEnabled(dummy_splits_array[self.split_image_number:].count(False) > 1)
- self.undo_split_button.setEnabled(self.split_image_number != 0)
- QApplication.processEvents()
-
- # Limit the number of time the comparison runs to reduce cpu usage
- frame_interval = 1 / self.settings_dict["fps_limit"]
- # Use a time delta to have a consistant check interval
- wait_delta_ms = int((frame_interval - (time() - start) % frame_interval) * 1000)
-
- below_flag = self.split_image.check_flag(BELOW_FLAG)
- # if the b flag is set, let similarity go above threshold first,
- # then split on similarity below threshold.
- # if no b flag, just split when similarity goes above threshold.
- # TODO: Abstract with similar check in Start Image
- if not self.waiting_for_split_delay:
- if similarity >= self.split_image.get_similarity_threshold(self):
- if not below_flag:
- break
- if not self.split_below_threshold:
- self.split_below_threshold = True
- QTest.qWait(wait_delta_ms)
- continue
-
- elif below_flag and self.split_below_threshold and is_valid_image(capture):
- self.split_below_threshold = False
- break
-
- QTest.qWait(wait_delta_ms)
-
- return False
-
- def __pause_loop(self, stop_time: float, message: str):
- """
- Wait for a certain time and show the timer to the user.
- Can be stopped early if the current split goes past the one when the loop started.
-
- Returns True if the loop was interrupted by a reset.
- """
- if stop_time <= 0:
- return False
- start_time = time()
- # Set a "pause" split image number.
- # This is done so that it can detect if user hit split/undo split while paused/delayed.
- pause_split_image_number = self.split_image_number
- while True:
- # Calculate similarity for Reset Image
- if self.__reset_if_should(self.__get_capture_for_comparison()[0]):
- return True
-
- time_delta = time() - start_time
- if (
- # Check for end of the pause/delay
- time_delta >= stop_time
- # Check for skip split / next image:
- or self.split_image_number > pause_split_image_number
- # Check for undo split / previous image:
- or self.split_image_number < pause_split_image_number
- ):
- break
-
- self.current_split_image.setText(f"{message} {seconds_remaining_text(stop_time - time_delta)}")
-
- QTest.qWait(1)
- return False
-
- def gui_changes_on_start(self):
- self.timer_start_image.stop()
- self.start_auto_splitter_button.setText("Running...")
- self.split_image_folder_button.setEnabled(False)
- self.reload_start_image_button.setEnabled(False)
- self.previous_image_button.setEnabled(True)
- self.next_image_button.setEnabled(True)
-
- # TODO: Do we actually need to disable setting new hotkeys once started?
- # What does this achieve? (See below TODO)
- if self.SettingsWidget:
- for hotkey in HOTKEYS:
- getattr(self.SettingsWidget, f"set_{hotkey}_hotkey_button").setEnabled(False)
-
- if not self.is_auto_controlled:
- self.start_auto_splitter_button.setEnabled(False)
- self.reset_button.setEnabled(True)
- self.undo_split_button.setEnabled(True)
- self.skip_split_button.setEnabled(True)
-
- QApplication.processEvents()
-
- def gui_changes_on_reset(self, safe_to_reload_start_image: bool = False):
- self.start_auto_splitter_button.setText("Start Auto Splitter")
- self.image_loop_value_label.setText("N/A")
- self.current_split_image.clear()
- self.current_image_file_label.clear()
- self.table_current_image_live_label.setText("-")
- self.table_current_image_highest_label.setText("-")
- self.table_current_image_threshold_label.setText("-")
- self.table_reset_image_live_label.setText("-")
- self.table_reset_image_highest_label.setText("-")
- self.table_reset_image_threshold_label.setText("-")
- self.split_image_folder_button.setEnabled(True)
- self.reload_start_image_button.setEnabled(True)
- self.previous_image_button.setEnabled(False)
- self.next_image_button.setEnabled(False)
-
- # TODO: Do we actually need to disable setting new hotkeys once started?
- # What does this achieve? (see above TODO)
- if self.SettingsWidget and not self.is_auto_controlled:
- for hotkey in HOTKEYS:
- getattr(self.SettingsWidget, f"set_{hotkey}_hotkey_button").setEnabled(True)
-
- if not self.is_auto_controlled:
- self.start_auto_splitter_button.setEnabled(True)
- self.reset_button.setEnabled(False)
- self.undo_split_button.setEnabled(False)
- self.skip_split_button.setEnabled(False)
-
- QApplication.processEvents()
- if safe_to_reload_start_image:
- self.load_start_image_signal.emit(False, False)
-
- def __get_capture_for_comparison(self):
- """Grab capture region and resize for comparison."""
- capture, is_old_image = self.capture_method.get_frame(self)
-
- # This most likely means we lost capture
- # (ie the captured window was closed, crashed, lost capture device, etc.)
- if not is_valid_image(capture):
- # Try to recover by using the window name
- if self.settings_dict["capture_method"] == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE:
- self.live_image.setText("Waiting for capture device...")
- else:
- self.live_image.setText("Trying to recover window...")
- recovered = self.capture_method.recover_window(self.settings_dict["captured_window_title"], self)
- if recovered:
- capture, _ = self.capture_method.get_frame(self)
-
- self.__update_live_image_details(capture)
- return capture, is_old_image
-
- def __reset_if_should(self, capture: MatLike | None):
- """Checks if we should reset, resets if it's the case, and returns the result."""
- if self.reset_image:
- if self.settings_dict["enable_auto_reset"]:
- similarity = self.reset_image.compare_with_capture(self, capture)
- threshold = self.reset_image.get_similarity_threshold(self)
-
- pause_times = [self.reset_image.get_pause_time(self)]
- if self.start_image:
- pause_times.append(self.start_image.get_pause_time(self))
- paused = time() - self.run_start_time <= max(pause_times)
- if paused:
- should_reset = False
- self.table_reset_image_live_label.setText("paused")
- else:
- should_reset = similarity >= threshold
- if similarity > self.reset_highest_similarity:
- self.reset_highest_similarity = similarity
- self.table_reset_image_highest_label.setText(decimal(self.reset_highest_similarity))
- self.table_reset_image_live_label.setText(decimal(similarity))
-
- self.table_reset_image_threshold_label.setText(decimal(threshold))
-
- if should_reset:
- send_command(self, "reset")
- self.reset()
- else:
- self.table_reset_image_live_label.setText("disabled")
- else:
- self.table_reset_image_live_label.setText("N/A")
- self.table_reset_image_threshold_label.setText("N/A")
- self.table_reset_image_highest_label.setText("N/A")
-
- return self.__check_for_reset_state_update_ui()
-
- def __update_split_image(self, specific_image: AutoSplitImage | None = None):
- # Start image is expected to be out of range (index 0 of 0-length array)
- if not specific_image or specific_image.image_type != ImageType.START:
- # need to reset highest_similarity and split_below_threshold each time an image updates.
- self.highest_similarity = 0.0
- self.split_below_threshold = False
- # Splitting/skipping when there are no images left or Undoing past the first image
- if self.__is_current_split_out_of_range():
- self.reset()
- return
-
- # Get split image
- self.split_image = specific_image or self.split_images_and_loop_number[0 + self.split_image_number][0]
- if is_valid_image(self.split_image.byte_array):
- set_preview_image(self.current_split_image, self.split_image.byte_array)
-
- self.current_image_file_label.setText(self.split_image.filename)
- self.table_current_image_threshold_label.setText(decimal(self.split_image.get_similarity_threshold(self)))
-
- # Set Image Loop number
- if specific_image and specific_image.image_type == ImageType.START:
- self.image_loop_value_label.setText("N/A")
- else:
- loop_tuple = self.split_images_and_loop_number[self.split_image_number]
- self.image_loop_value_label.setText(f"{loop_tuple[1]}/{loop_tuple[0].loops}")
-
- @override
- def closeEvent(self, event: QtGui.QCloseEvent | None = None):
- """Exit safely when closing the window."""
-
- def exit_program() -> NoReturn:
- if self.update_auto_control:
- # self.update_auto_control.terminate() hangs in PySide6
- self.update_auto_control.quit()
- self.capture_method.close(self)
- if event is not None:
- event.accept()
- if self.is_auto_controlled:
- # stop main thread (which is probably blocked reading input) via an interrupt signal
- os.kill(os.getpid(), signal.SIGINT)
- sys.exit()
-
- # Simulates LiveSplit quitting without asking. See "TODO" at update_auto_control Worker
- # This also more gracefully exits LiveSplit
- # Users can still manually save their settings
- if event is None:
- exit_program()
-
- if user_profile.have_settings_changed(self):
- # Give a different warning if there was never a settings file that was loaded successfully,
- # and "save as" instead of "save".
- settings_file_name = "Untitled" \
- if not self.last_successfully_loaded_settings_file_path \
- else os.path.basename(self.last_successfully_loaded_settings_file_path)
-
- warning = QMessageBox.warning(
- self,
- "AutoSplit",
- f"Do you want to save changes made to settings file {settings_file_name}?",
- QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No | QMessageBox.StandardButton.Cancel,
- )
-
- if warning is QMessageBox.StandardButton.Yes:
- if user_profile.save_settings(self):
- exit_program()
- else:
- event.ignore()
- if warning is QMessageBox.StandardButton.No:
- exit_program()
- if warning is QMessageBox.StandardButton.Cancel:
- event.ignore()
- else:
- exit_program()
-
-
-def set_preview_image(qlabel: QLabel, image: MatLike | None):
- if not is_valid_image(image):
- # Clear current pixmap if no image. But don't clear text
- if not qlabel.text():
- qlabel.clear()
- else:
- height, width, channels = image.shape
-
- if channels == BGRA_CHANNEL_COUNT:
- image_format = QtGui.QImage.Format.Format_RGBA8888
- capture = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA)
- else:
- image_format = QtGui.QImage.Format.Format_BGR888
- capture = image
-
- qimage = QtGui.QImage(
- capture.data, # pyright: ignore[reportGeneralTypeIssues] # https://bugreports.qt.io/browse/PYSIDE-2476
- width, height,
- width *
- channels, image_format,
- )
- qlabel.setPixmap(
- QtGui.QPixmap(qimage).scaled(
- qlabel.size(),
- QtCore.Qt.AspectRatioMode.IgnoreAspectRatio,
- QtCore.Qt.TransformationMode.SmoothTransformation,
- ),
- )
-
-
-def seconds_remaining_text(seconds: float):
- return f"{seconds:.1f} second{'' if 0 < seconds <= 1 else 's'} remaining"
-
-
-def is_already_open():
- # When running directly in Python, any AutoSplit process means it's already open
- # When bundled, we must ignore itself and the splash screen
- max_processes = 3 if FROZEN else 1
- process_count = 0
- for process in process_iter():
- if process.name() == "AutoSplit.exe":
- process_count += 1
- if process_count >= max_processes:
- return True
- return False
-
-
-def main():
- # Best to call setStyle before the QApplication constructor
- # https://doc.qt.io/qt-6/qapplication.html#setStyle-1
- QApplication.setStyle("fusion")
- # Call to QApplication outside the try-except so we can show error messages
- app = QApplication(sys.argv)
- try:
- app.setWindowIcon(QtGui.QIcon(":/resources/icon.ico"))
-
- if is_already_open():
- error_messages.already_open()
-
- AutoSplit()
-
- if not FROZEN:
- # Kickoff the event loop every so often so we can handle KeyboardInterrupt (^C)
- timer = QtCore.QTimer()
- timer.timeout.connect(lambda: None)
- timer.start(500)
-
- exit_code = app.exec()
- except Exception as exception: # noqa: BLE001 # We really want to catch everything here
- error_messages.handle_top_level_exceptions(exception)
-
- # Catch Keyboard Interrupts for a clean close
- signal.signal(signal.SIGINT, lambda code, _: sys.exit(code))
-
- sys.exit(exit_code)
-
-
-if __name__ == "__main__":
- main()
+#!/usr/bin/python3
+import ctypes
+import os
+import signal
+import sys
+from collections.abc import Callable
+from time import time
+from types import FunctionType
+from typing import NoReturn
+
+import certifi
+import cv2
+from cv2.typing import MatLike
+from psutil import process_iter
+from PySide6 import QtCore, QtGui
+from PySide6.QtTest import QTest
+from PySide6.QtWidgets import QApplication, QFileDialog, QLabel, QMainWindow, QMessageBox
+from typing_extensions import override
+
+import error_messages
+import user_profile
+from AutoControlledThread import AutoControlledThread
+from AutoSplitImage import START_KEYWORD, AutoSplitImage, ImageType
+from capture_method import CaptureMethodBase, CaptureMethodEnum
+from gen import about, design, settings, update_checker
+from hotkeys import HOTKEYS, after_setting_hotkey, send_command
+from menu_bar import (
+ about_qt,
+ about_qt_for_python,
+ check_for_updates,
+ get_default_settings_from_ui,
+ open_about,
+ open_settings,
+ open_update_checker,
+ view_help,
+)
+from region_selection import align_region, select_region, select_window, validate_before_parsing
+from split_parser import BELOW_FLAG, DUMMY_FLAG, PAUSE_FLAG, parse_and_validate_images
+from user_profile import DEFAULT_PROFILE
+from utils import (
+ AUTOSPLIT_VERSION,
+ BGRA_CHANNEL_COUNT,
+ FROZEN,
+ auto_split_directory,
+ decimal,
+ flatten,
+ is_valid_image,
+ open_file,
+)
+
+CHECK_FPS_ITERATIONS = 10
+DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2 = 2
+
+# Needed when compiled, along with the custom hook-requests PyInstaller hook
+os.environ["REQUESTS_CA_BUNDLE"] = certifi.where()
+myappid = f"Toufool.AutoSplit.v{AUTOSPLIT_VERSION}"
+ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
+
+
+class AutoSplit(QMainWindow, design.Ui_MainWindow):
+ # Parse command line args
+ is_auto_controlled = "--auto-controlled" in sys.argv
+
+ # Signals
+ start_auto_splitter_signal = QtCore.Signal()
+ reset_signal = QtCore.Signal()
+ skip_split_signal = QtCore.Signal()
+ undo_split_signal = QtCore.Signal()
+ pause_signal = QtCore.Signal()
+ screenshot_signal = QtCore.Signal()
+ after_setting_hotkey_signal = QtCore.Signal()
+ update_checker_widget_signal = QtCore.Signal(str, bool)
+ load_start_image_signal = QtCore.Signal(bool, bool)
+ # Use this signal when trying to show an error from outside the main thread
+ show_error_signal = QtCore.Signal(FunctionType)
+
+ # Timers
+ timer_live_image = QtCore.QTimer()
+ timer_start_image = QtCore.QTimer()
+
+ # Widgets
+ AboutWidget: about.Ui_AboutAutoSplitWidget | None = None
+ UpdateCheckerWidget: update_checker.Ui_UpdateChecker | None = None
+ CheckForUpdatesThread: QtCore.QThread | None = None
+ SettingsWidget: settings.Ui_SettingsWidget | None = None
+
+ def __init__(self): # noqa: PLR0915
+ super().__init__()
+
+ # Initialize a few attributes
+ self.hwnd = 0
+ """Window Handle used for Capture Region"""
+ self.last_saved_settings = DEFAULT_PROFILE
+ self.similarity = 0.0
+ self.split_image_number = 0
+ self.split_images_and_loop_number: list[tuple[AutoSplitImage, int]] = []
+ self.split_groups: list[list[int]] = []
+ self.capture_method = CaptureMethodBase(None)
+ self.is_running = False
+
+ # Last loaded settings empty and last successful loaded settings file path to None until we try to load them
+ self.last_loaded_settings = DEFAULT_PROFILE
+ self.last_successfully_loaded_settings_file_path: str | None = None
+ """For when a file has never loaded, but you successfully "Save File As"."""
+
+ # Automatic timer start
+ self.highest_similarity = 0.0
+ self.reset_highest_similarity = 0.0
+
+ # Ensure all other attributes are defined
+ self.waiting_for_split_delay = False
+ self.split_below_threshold = False
+ self.run_start_time = 0.0
+ self.start_image: AutoSplitImage | None = None
+ self.reset_image: AutoSplitImage | None = None
+ self.split_images: list[AutoSplitImage] = []
+ self.split_image: AutoSplitImage | None = None
+ self.update_auto_control: AutoControlledThread | None = None
+
+ # Setup global error handling
+ def _show_error_signal_slot(error_message_box: Callable[..., object]):
+ return error_message_box()
+
+ self.show_error_signal.connect(_show_error_signal_slot)
+ sys.excepthook = error_messages.make_excepthook(self)
+
+ self.setupUi(self)
+ self.setWindowTitle(
+ f"AutoSplit v{AUTOSPLIT_VERSION}" +
+ (" (externally controlled)" if self.is_auto_controlled else ""),
+ )
+
+ # Hotkeys need to be initialized to be passed as thread arguments in hotkeys.py
+ for hotkey in HOTKEYS:
+ setattr(self, f"{hotkey}_hotkey", None)
+
+ # Get default values defined in SettingsDialog
+ self.settings_dict = get_default_settings_from_ui(self)
+ user_profile.load_check_for_updates_on_open(self)
+
+ if self.is_auto_controlled:
+ self.start_auto_splitter_button.setEnabled(False)
+
+ # Send version and process ID to stdout
+ # THIS HAS TO BE THE FIRST TWO LINES SENT
+ print(f"{AUTOSPLIT_VERSION}\n{os.getpid()}", flush=True)
+
+ # Use and Start the thread that checks for updates from LiveSplit
+ self.update_auto_control = AutoControlledThread(self)
+ self.update_auto_control.start()
+
+ # split image folder line edit text
+ self.split_image_folder_input.setText("No Folder Selected")
+
+ # Connecting menu actions
+ self.action_view_help.triggered.connect(view_help)
+ self.action_about.triggered.connect(lambda: open_about(self))
+ self.action_about_qt.triggered.connect(about_qt)
+ self.action_about_qt_for_python.triggered.connect(about_qt_for_python)
+ self.action_check_for_updates.triggered.connect(lambda: check_for_updates(self))
+ self.action_settings.triggered.connect(lambda: open_settings(self))
+ self.action_save_profile.triggered.connect(lambda: user_profile.save_settings(self))
+ self.action_save_profile_as.triggered.connect(lambda: user_profile.save_settings_as(self))
+ self.action_load_profile.triggered.connect(lambda: user_profile.load_settings(self))
+
+ # Connecting button clicks to functions
+ self.split_image_folder_button.clicked.connect(self.__browse)
+ self.select_region_button.clicked.connect(lambda: select_region(self))
+ self.take_screenshot_button.clicked.connect(self.__take_screenshot)
+ self.start_auto_splitter_button.clicked.connect(self.__auto_splitter)
+ self.check_fps_button.clicked.connect(self.__check_fps)
+ self.reset_button.clicked.connect(self.reset)
+ self.skip_split_button.clicked.connect(self.skip_split)
+ self.undo_split_button.clicked.connect(self.undo_split)
+ self.next_image_button.clicked.connect(lambda: self.skip_split(True))
+ self.previous_image_button.clicked.connect(lambda: self.undo_split(True))
+ self.align_region_button.clicked.connect(lambda: align_region(self))
+ self.select_window_button.clicked.connect(lambda: select_window(self))
+ self.reload_start_image_button.clicked.connect(lambda: self.__load_start_image(True, True))
+ self.action_check_for_updates_on_open.changed.connect(
+ lambda: user_profile.set_check_for_updates_on_open(self, self.action_check_for_updates_on_open.isChecked()),
+ )
+
+ # update x, y, width, and height when changing the value of these spinbox's are changed
+ self.x_spinbox.valueChanged.connect(self.__update_x)
+ self.y_spinbox.valueChanged.connect(self.__update_y)
+ self.width_spinbox.valueChanged.connect(self.__update_width)
+ self.height_spinbox.valueChanged.connect(self.__update_height)
+
+ # connect signals to functions
+ self.after_setting_hotkey_signal.connect(lambda: after_setting_hotkey(self))
+ self.start_auto_splitter_signal.connect(self.__auto_splitter)
+
+ def _update_checker_widget_signal_slot(latest_version: str, check_on_open: bool):
+ return open_update_checker(self, latest_version, check_on_open)
+
+ self.update_checker_widget_signal.connect(_update_checker_widget_signal_slot)
+
+ self.load_start_image_signal.connect(self.__load_start_image)
+ self.reset_signal.connect(self.reset)
+ self.skip_split_signal.connect(self.skip_split)
+ self.undo_split_signal.connect(self.undo_split)
+ self.pause_signal.connect(self.pause)
+ self.screenshot_signal.connect(self.__take_screenshot)
+
+ # live image checkbox
+ self.timer_live_image.timeout.connect(lambda: self.__update_live_image_details(None, True))
+ self.timer_live_image.start(int(1000 / self.settings_dict["fps_limit"]))
+
+ # Automatic timer start
+ self.timer_start_image.timeout.connect(self.__start_image_function)
+
+ self.show()
+
+ try:
+ import pyi_splash # pyright: ignore[reportMissingModuleSource]
+
+ pyi_splash.close()
+ except ModuleNotFoundError:
+ pass
+
+ # Needs to be after Ui_MainWindow.show() to be shown on top
+ if not self.is_auto_controlled:
+ # Must also be done later to help load the saved capture window
+ user_profile.load_settings_on_open(self)
+ if self.action_check_for_updates_on_open.isChecked():
+ check_for_updates(self, check_on_open=True)
+
+ # FUNCTIONS
+
+ def __browse(self):
+ # User selects the file with the split images in it.
+ new_split_image_directory = QFileDialog.getExistingDirectory(
+ self,
+ "Select Split Image Directory",
+ os.path.join(self.settings_dict["split_image_directory"] or auto_split_directory, ".."),
+ )
+
+ # If the user doesn't select a folder, it defaults to "".
+ if new_split_image_directory:
+ # set the split image folder line to the directory text
+ self.settings_dict["split_image_directory"] = new_split_image_directory
+ self.split_image_folder_input.setText(f"{new_split_image_directory}/")
+ self.load_start_image_signal.emit(False, True)
+
+ def __update_live_image_details(self, capture: MatLike | None, called_from_timer: bool = False):
+ # HACK: Since this is also called in __get_capture_for_comparison,
+ # we don't need to update anything if the app is running
+ if called_from_timer:
+ if self.is_running or self.start_image:
+ return
+ capture, _ = self.capture_method.get_frame(self)
+
+ # Update title from target window or Capture Device name
+ capture_region_window_label = (
+ self.settings_dict["capture_device_name"]
+ if self.settings_dict["capture_method"] == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE
+ else self.settings_dict["captured_window_title"]
+ )
+ self.capture_region_window_label.setText(capture_region_window_label)
+
+ # Simply clear if "live capture region" setting is off
+ if not (self.settings_dict["live_capture_region"] and capture_region_window_label):
+ self.live_image.clear()
+ # Set live image in UI
+ else:
+ set_preview_image(self.live_image, capture)
+
+ def __load_start_image(self, started_by_button: bool = False, wait_for_delay: bool = True):
+ """Not thread safe (if triggered by LiveSplit for example). Use `load_start_image_signal.emit` instead."""
+ self.timer_start_image.stop()
+ self.current_image_file_label.setText("-")
+ self.start_image_status_value_label.setText("not found")
+ set_preview_image(self.current_split_image, None)
+
+ if not (validate_before_parsing(self, started_by_button) and parse_and_validate_images(self)):
+ QApplication.processEvents()
+ return
+
+ if not self.start_image:
+ if started_by_button:
+ error_messages.no_keyword_image(START_KEYWORD)
+ QApplication.processEvents()
+ return
+
+ self.split_image_number = 0
+
+ if not wait_for_delay and self.start_image.get_pause_time(self) > 0:
+ self.start_image_status_value_label.setText("paused")
+ self.table_current_image_highest_label.setText("-")
+ self.table_current_image_threshold_label.setText("-")
+ else:
+ self.start_image_status_value_label.setText("ready")
+ self.__update_split_image(self.start_image)
+
+ self.highest_similarity = 0.0
+ self.reset_highest_similarity = 0.0
+ self.split_below_threshold = False
+ self.timer_start_image.start(int(1000 / self.settings_dict["fps_limit"]))
+
+ QApplication.processEvents()
+
+ def __start_image_function(self):
+ if not self.start_image:
+ return
+
+ self.start_image_status_value_label.setText("ready")
+ self.__update_split_image(self.start_image)
+
+ capture, _ = self.__get_capture_for_comparison()
+ start_image_threshold = self.start_image.get_similarity_threshold(self)
+ start_image_similarity = self.start_image.compare_with_capture(self, capture)
+
+ # If the similarity becomes higher than highest similarity, set it as such.
+ if start_image_similarity > self.highest_similarity:
+ self.highest_similarity = start_image_similarity
+
+ self.table_current_image_live_label.setText(decimal(start_image_similarity))
+ self.table_current_image_highest_label.setText(decimal(self.highest_similarity))
+ self.table_current_image_threshold_label.setText(decimal(start_image_threshold))
+
+ # If the {b} flag is set, let similarity go above threshold first, then split on similarity below threshold
+ # Otherwise just split when similarity goes above threshold
+ # TODO: Abstract with similar check in split image
+ below_flag = self.start_image.check_flag(BELOW_FLAG)
+
+ # Negative means belove threshold, positive means above
+ similarity_diff = start_image_similarity - start_image_threshold
+ if below_flag and not self.split_below_threshold and similarity_diff >= 0:
+ self.split_below_threshold = True
+ return
+ if (
+ (below_flag and self.split_below_threshold and similarity_diff < 0 and is_valid_image(capture)) # noqa: PLR0916 # See above TODO
+ or (not below_flag and similarity_diff >= 0)
+ ):
+ self.timer_start_image.stop()
+ self.split_below_threshold = False
+
+ if not self.start_image.check_flag(DUMMY_FLAG):
+ # Delay Start Image if needed
+ if self.start_image.get_delay_time(self) > 0:
+ self.start_image_status_value_label.setText("delaying start...")
+ delay_start_time = time()
+ start_delay = self.start_image.get_delay_time(self) / 1000
+ time_delta = 0.0
+ while time_delta < start_delay:
+ delay_time_left = start_delay - time_delta
+ self.current_split_image.setText(
+ f"Delayed Before Starting:\n {seconds_remaining_text(delay_time_left)}",
+ )
+ # Wait 0.1s. Doesn't need to be shorter as we only show 1 decimal
+ QTest.qWait(100)
+ time_delta = time() - delay_start_time
+ send_command(self, "start")
+
+ self.start_image_status_value_label.setText("started")
+ self.start_auto_splitter()
+
+ # update x, y, width, height when spinbox values are changed
+ def __update_x(self):
+ self.settings_dict["capture_region"]["x"] = self.x_spinbox.value()
+
+ def __update_y(self):
+ self.settings_dict["capture_region"]["y"] = self.y_spinbox.value()
+
+ def __update_width(self):
+ self.settings_dict["capture_region"]["width"] = self.width_spinbox.value()
+
+ def __update_height(self):
+ self.settings_dict["capture_region"]["height"] = self.height_spinbox.value()
+
+ def __take_screenshot(self):
+ if not validate_before_parsing(self, check_empty_directory=False):
+ return
+
+ # Check if file exists and rename it if it does.
+ # Below starts the file_name_number at #001 up to #999. After that it will go to 1000,
+ # which is a problem, but I doubt anyone will get to 1000 split images...
+ screenshot_index = 1
+ while True:
+ screenshot_path = os.path.join(
+ self.settings_dict["screenshot_directory"] or self.settings_dict["split_image_directory"],
+ f"{screenshot_index:03}_SplitImage.png",
+ )
+ if not os.path.exists(screenshot_path):
+ break
+ screenshot_index += 1
+
+ # Grab screenshot of capture region
+ capture, _ = self.capture_method.get_frame(self)
+ if not is_valid_image(capture):
+ error_messages.region()
+ return
+
+ # Save and open image
+ cv2.imwrite(screenshot_path, capture)
+ if self.settings_dict["open_screenshot"]:
+ open_file(screenshot_path)
+
+ def __check_fps(self):
+ self.fps_value_label.setText("...")
+ QApplication.processEvents()
+ if not (validate_before_parsing(self) and parse_and_validate_images(self)):
+ self.fps_value_label.clear()
+ return
+
+ images = self.split_images
+ if self.start_image:
+ images.append(self.start_image)
+ if self.reset_image:
+ images.append(self.reset_image)
+
+ # run X iterations of screenshotting capture region + comparison + displaying.
+ t0 = time()
+ for image in images:
+ count = 0
+ while count < CHECK_FPS_ITERATIONS:
+ capture, is_old_image = self.__get_capture_for_comparison()
+ _ = image.compare_with_capture(self, capture)
+ # TODO: If is_old_image=true is always returned, this becomes an infinite loop
+ if not is_old_image:
+ count += 1
+
+ # calculate FPS
+ t1 = time()
+ fps = int((CHECK_FPS_ITERATIONS * len(images)) / (t1 - t0))
+ self.fps_value_label.setText(str(fps))
+
+ def __is_current_split_out_of_range(self):
+ return self.split_image_number < 0 \
+ or self.split_image_number > len(self.split_images_and_loop_number) - 1
+
+ def undo_split(self, navigate_image_only: bool = False):
+ """Undo Split" and "Prev. Img." buttons connect to here."""
+ # Can't undo until timer is started
+ # or Undoing past the first image
+ if (
+ not self.is_running
+ or "Delayed Split" in self.current_split_image.text()
+ or (not self.undo_split_button.isEnabled() and not self.is_auto_controlled)
+ or self.__is_current_split_out_of_range()
+ ):
+ return
+
+ if not navigate_image_only:
+ for i, group in enumerate(self.split_groups):
+ if i > 0 and self.split_image_number in group:
+ self.split_image_number = self.split_groups[i - 1][-1]
+ break
+ else:
+ self.split_image_number -= 1
+
+ self.__update_split_image()
+ if not navigate_image_only:
+ send_command(self, "undo")
+
+ def skip_split(self, navigate_image_only: bool = False):
+ """Skip Split" and "Next Img." buttons connect to here."""
+ # Can't skip or split until timer is started
+ # or Splitting/skipping when there are no images left
+ if (
+ not self.is_running
+ or "Delayed Split" in self.current_split_image.text()
+ or not (self.skip_split_button.isEnabled() or self.is_auto_controlled or navigate_image_only)
+ or self.__is_current_split_out_of_range()
+ ):
+ return
+
+ if not navigate_image_only:
+ for group in self.split_groups:
+ if self.split_image_number in group:
+ self.split_image_number = group[-1] + 1
+ break
+ else:
+ self.split_image_number += 1
+
+ self.__update_split_image()
+ if not navigate_image_only:
+ send_command(self, "skip")
+
+ def pause(self):
+ # TODO: add what to do when you hit pause hotkey, if this even needs to be done
+ pass
+
+ def reset(self):
+ """
+ When the reset button or hotkey is pressed, it will set `is_running` to False,
+ which will trigger in the __auto_splitter function, if running, to abort and change GUI.
+ """
+ self.is_running = False
+
+ # Functions for the hotkeys to return to the main thread from signals and start their corresponding functions
+ def start_auto_splitter(self):
+ # If the auto splitter is already running or the button is disabled, don't emit the signal to start it.
+ if self.is_running \
+ or (not self.start_auto_splitter_button.isEnabled() and not self.is_auto_controlled):
+ return
+
+ start_label: str = self.start_image_status_value_label.text()
+ if start_label.endswith(("ready", "paused")):
+ self.start_image_status_value_label.setText("not ready")
+
+ self.start_auto_splitter_signal.emit()
+
+ def __check_for_reset_state_update_ui(self):
+ """Check if AutoSplit is started, if not then update the GUI."""
+ if not self.is_running:
+ self.gui_changes_on_reset(True)
+ return True
+ return False
+
+ def __auto_splitter(self): # noqa: PLR0912,PLR0915
+ if not self.settings_dict["split_hotkey"] and not self.is_auto_controlled:
+ self.gui_changes_on_reset(True)
+ error_messages.split_hotkey()
+ return
+
+ # Set start time before parsing the images as it's a heavy operation that will cause delays
+ self.run_start_time = time()
+
+ if not (validate_before_parsing(self) and parse_and_validate_images(self)):
+ # `safe_to_reload_start_image: bool = False` becasue __load_start_image also does this check,
+ # we don't want to double a Start/Reset Image error message
+ self.gui_changes_on_reset(False)
+ return
+
+ # Construct a list of images + loop count tuples.
+ self.split_images_and_loop_number = list(
+ flatten(
+ ((split_image, i + 1) for i in range(split_image.loops))
+ for split_image
+ in self.split_images
+ ),
+ )
+
+ # Construct groups of splits
+ self.split_groups = []
+ dummy_splits_array = []
+ number_of_split_images = len(self.split_images_and_loop_number)
+ current_group: list[int] = []
+ self.split_groups.append(current_group)
+ for i, image in enumerate(self.split_images_and_loop_number):
+ current_group.append(i)
+ dummy = image[0].check_flag(DUMMY_FLAG)
+ dummy_splits_array.append(dummy)
+ if not dummy and i < number_of_split_images - 1:
+ current_group = []
+ self.split_groups.append(current_group)
+
+ self.is_running = True
+ self.gui_changes_on_start()
+
+ # Start pause time
+ if self.start_image:
+ self.__pause_loop(self.start_image.get_pause_time(self), "None (Paused).")
+
+ # Initialize a few attributes
+ self.split_image_number = 0
+ self.waiting_for_split_delay = False
+ self.split_below_threshold = False
+ split_time = 0
+
+ # First loop: stays in this loop until all of the split images have been split
+ while self.split_image_number < number_of_split_images:
+ # Check if we are not waiting for the split delay to send the key press
+ if self.waiting_for_split_delay:
+ time_millis = int(round(time() * 1000))
+ if time_millis < split_time:
+ QApplication.processEvents()
+ continue
+
+ self.__update_split_image()
+
+ # Type checking
+ if not self.split_image:
+ return
+
+ # Second loop: stays in this loop until similarity threshold is met
+ if self.__similarity_threshold_loop(number_of_split_images, dummy_splits_array):
+ return
+
+ # We need to make sure that this isn't a dummy split before sending the key press.
+ if not self.split_image.check_flag(DUMMY_FLAG):
+ # If it's a delayed split, check if the delay has passed
+ # Otherwise calculate the split time for the key press
+ split_delay = self.split_image.get_delay_time(self) / 1000
+ if split_delay > 0 and not self.waiting_for_split_delay:
+ split_time = round(time() + split_delay * 1000)
+ self.waiting_for_split_delay = True
+ buttons_to_disable = [
+ self.next_image_button,
+ self.previous_image_button,
+ self.undo_split_button,
+ self.skip_split_button,
+ ]
+ for button in buttons_to_disable:
+ button.setEnabled(False)
+ self.current_image_file_label.clear()
+
+ # check for reset while delayed and display a counter of the remaining split delay time
+ if self.__pause_loop(split_delay, "Delayed Split:"):
+ return
+
+ for button in buttons_to_disable:
+ button.setEnabled(True)
+
+ self.waiting_for_split_delay = False
+
+ # if {p} flag hit pause key, otherwise hit split hotkey
+ send_command(self, "pause" if self.split_image.check_flag(PAUSE_FLAG) else "split")
+
+ # if loop check box is checked and its the last split, go to first split.
+ # else go to the next split image.
+ if self.settings_dict["loop_splits"] and self.split_image_number == number_of_split_images - 1:
+ self.split_image_number = 0
+ else:
+ self.split_image_number += 1
+
+ # If its not the last split image, pause for the amount set by the user
+ # A pause loop to check if the user presses skip split, undo split, or reset here.
+ # Also updates the current split image text, counting down the time until the next split image
+ if self.__pause_loop(self.split_image.get_pause_time(self), "None (Paused)."):
+ return
+
+ # loop breaks to here when the last image splits
+ self.is_running = False
+ self.gui_changes_on_reset(True)
+
+ def __similarity_threshold_loop(self, number_of_split_images: int, dummy_splits_array: list[bool]):
+ """
+ Wait until the similarity threshold is met.
+
+ Returns True if the loop was interrupted by a reset.
+ """
+ # Type checking
+ if not self.split_image:
+ return False
+
+ start = time()
+ while True:
+ capture, _ = self.__get_capture_for_comparison()
+
+ if self.__reset_if_should(capture):
+ return True
+
+ similarity = self.split_image.compare_with_capture(self, capture)
+
+ # Show live similarity
+ self.table_current_image_live_label.setText(decimal(similarity))
+
+ # if the similarity becomes higher than highest similarity, set it as such.
+ if similarity > self.highest_similarity:
+ self.highest_similarity = similarity
+
+ # show live highest similarity if the checkbox is checked
+ self.table_current_image_highest_label.setText(decimal(self.highest_similarity))
+
+ # If its the last split image and last loop number, disable the next image button
+ # If its the first split image, disable the undo split and previous image buttons
+ self.next_image_button.setEnabled(self.split_image_number != number_of_split_images - 1)
+ self.previous_image_button.setEnabled(self.split_image_number != 0)
+ if not self.is_auto_controlled:
+ # If its the last non-dummy split image and last loop number, disable the skip split button
+ self.skip_split_button.setEnabled(dummy_splits_array[self.split_image_number:].count(False) > 1)
+ self.undo_split_button.setEnabled(self.split_image_number != 0)
+ QApplication.processEvents()
+
+ # Limit the number of time the comparison runs to reduce cpu usage
+ frame_interval = 1 / self.settings_dict["fps_limit"]
+ # Use a time delta to have a consistant check interval
+ wait_delta_ms = int((frame_interval - (time() - start) % frame_interval) * 1000)
+
+ below_flag = self.split_image.check_flag(BELOW_FLAG)
+ # if the b flag is set, let similarity go above threshold first,
+ # then split on similarity below threshold.
+ # if no b flag, just split when similarity goes above threshold.
+ # TODO: Abstract with similar check in Start Image
+ if not self.waiting_for_split_delay:
+ if similarity >= self.split_image.get_similarity_threshold(self):
+ if not below_flag:
+ break
+ if not self.split_below_threshold:
+ self.split_below_threshold = True
+ QTest.qWait(wait_delta_ms)
+ continue
+
+ elif below_flag and self.split_below_threshold and is_valid_image(capture):
+ self.split_below_threshold = False
+ break
+
+ QTest.qWait(wait_delta_ms)
+
+ return False
+
+ def __pause_loop(self, stop_time: float, message: str):
+ """
+ Wait for a certain time and show the timer to the user.
+ Can be stopped early if the current split goes past the one when the loop started.
+
+ Returns True if the loop was interrupted by a reset.
+ """
+ if stop_time <= 0:
+ return False
+ start_time = time()
+ # Set a "pause" split image number.
+ # This is done so that it can detect if user hit split/undo split while paused/delayed.
+ pause_split_image_number = self.split_image_number
+ while True:
+ # Calculate similarity for Reset Image
+ if self.__reset_if_should(self.__get_capture_for_comparison()[0]):
+ return True
+
+ time_delta = time() - start_time
+ if (
+ # Check for end of the pause/delay
+ time_delta >= stop_time
+ # Check for skip split / next image:
+ or self.split_image_number > pause_split_image_number
+ # Check for undo split / previous image:
+ or self.split_image_number < pause_split_image_number
+ ):
+ break
+
+ self.current_split_image.setText(f"{message} {seconds_remaining_text(stop_time - time_delta)}")
+
+ QTest.qWait(1)
+ return False
+
+ def gui_changes_on_start(self):
+ self.timer_start_image.stop()
+ self.start_auto_splitter_button.setText("Running...")
+ self.split_image_folder_button.setEnabled(False)
+ self.reload_start_image_button.setEnabled(False)
+ self.previous_image_button.setEnabled(True)
+ self.next_image_button.setEnabled(True)
+
+ # TODO: Do we actually need to disable setting new hotkeys once started?
+ # What does this achieve? (See below TODO)
+ if self.SettingsWidget:
+ for hotkey in HOTKEYS:
+ getattr(self.SettingsWidget, f"set_{hotkey}_hotkey_button").setEnabled(False)
+
+ if not self.is_auto_controlled:
+ self.start_auto_splitter_button.setEnabled(False)
+ self.reset_button.setEnabled(True)
+ self.undo_split_button.setEnabled(True)
+ self.skip_split_button.setEnabled(True)
+
+ QApplication.processEvents()
+
+ def gui_changes_on_reset(self, safe_to_reload_start_image: bool = False):
+ self.start_auto_splitter_button.setText("Start Auto Splitter")
+ self.image_loop_value_label.setText("N/A")
+ self.current_split_image.clear()
+ self.current_image_file_label.clear()
+ self.table_current_image_live_label.setText("-")
+ self.table_current_image_highest_label.setText("-")
+ self.table_current_image_threshold_label.setText("-")
+ self.table_reset_image_live_label.setText("-")
+ self.table_reset_image_highest_label.setText("-")
+ self.table_reset_image_threshold_label.setText("-")
+ self.split_image_folder_button.setEnabled(True)
+ self.reload_start_image_button.setEnabled(True)
+ self.previous_image_button.setEnabled(False)
+ self.next_image_button.setEnabled(False)
+
+ # TODO: Do we actually need to disable setting new hotkeys once started?
+ # What does this achieve? (see above TODO)
+ if self.SettingsWidget and not self.is_auto_controlled:
+ for hotkey in HOTKEYS:
+ getattr(self.SettingsWidget, f"set_{hotkey}_hotkey_button").setEnabled(True)
+
+ if not self.is_auto_controlled:
+ self.start_auto_splitter_button.setEnabled(True)
+ self.reset_button.setEnabled(False)
+ self.undo_split_button.setEnabled(False)
+ self.skip_split_button.setEnabled(False)
+
+ QApplication.processEvents()
+ if safe_to_reload_start_image:
+ self.load_start_image_signal.emit(False, False)
+
+ def __get_capture_for_comparison(self):
+ """Grab capture region and resize for comparison."""
+ capture, is_old_image = self.capture_method.get_frame(self)
+
+ # This most likely means we lost capture
+ # (ie the captured window was closed, crashed, lost capture device, etc.)
+ if not is_valid_image(capture):
+ # Try to recover by using the window name
+ if self.settings_dict["capture_method"] == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE:
+ self.live_image.setText("Waiting for capture device...")
+ else:
+ self.live_image.setText("Trying to recover window...")
+ recovered = self.capture_method.recover_window(self.settings_dict["captured_window_title"], self)
+ if recovered:
+ capture, _ = self.capture_method.get_frame(self)
+
+ self.__update_live_image_details(capture)
+ return capture, is_old_image
+
+ def __reset_if_should(self, capture: MatLike | None):
+ """Checks if we should reset, resets if it's the case, and returns the result."""
+ if self.reset_image:
+ if self.settings_dict["enable_auto_reset"]:
+ similarity = self.reset_image.compare_with_capture(self, capture)
+ threshold = self.reset_image.get_similarity_threshold(self)
+
+ pause_times = [self.reset_image.get_pause_time(self)]
+ if self.start_image:
+ pause_times.append(self.start_image.get_pause_time(self))
+ paused = time() - self.run_start_time <= max(pause_times)
+ if paused:
+ should_reset = False
+ self.table_reset_image_live_label.setText("paused")
+ else:
+ should_reset = similarity >= threshold
+ if similarity > self.reset_highest_similarity:
+ self.reset_highest_similarity = similarity
+ self.table_reset_image_highest_label.setText(decimal(self.reset_highest_similarity))
+ self.table_reset_image_live_label.setText(decimal(similarity))
+
+ self.table_reset_image_threshold_label.setText(decimal(threshold))
+
+ if should_reset:
+ send_command(self, "reset")
+ self.reset()
+ else:
+ self.table_reset_image_live_label.setText("disabled")
+ else:
+ self.table_reset_image_live_label.setText("N/A")
+ self.table_reset_image_threshold_label.setText("N/A")
+ self.table_reset_image_highest_label.setText("N/A")
+
+ return self.__check_for_reset_state_update_ui()
+
+ def __update_split_image(self, specific_image: AutoSplitImage | None = None):
+ # Start image is expected to be out of range (index 0 of 0-length array)
+ if not specific_image or specific_image.image_type != ImageType.START:
+ # need to reset highest_similarity and split_below_threshold each time an image updates.
+ self.highest_similarity = 0.0
+ self.split_below_threshold = False
+ # Splitting/skipping when there are no images left or Undoing past the first image
+ if self.__is_current_split_out_of_range():
+ self.reset()
+ return
+
+ # Get split image
+ self.split_image = specific_image or self.split_images_and_loop_number[0 + self.split_image_number][0]
+ if is_valid_image(self.split_image.byte_array):
+ set_preview_image(self.current_split_image, self.split_image.byte_array)
+
+ self.current_image_file_label.setText(self.split_image.filename)
+ self.table_current_image_threshold_label.setText(decimal(self.split_image.get_similarity_threshold(self)))
+
+ # Set Image Loop number
+ if specific_image and specific_image.image_type == ImageType.START:
+ self.image_loop_value_label.setText("N/A")
+ else:
+ loop_tuple = self.split_images_and_loop_number[self.split_image_number]
+ self.image_loop_value_label.setText(f"{loop_tuple[1]}/{loop_tuple[0].loops}")
+
+ @override
+ def closeEvent(self, event: QtGui.QCloseEvent | None = None):
+ """Exit safely when closing the window."""
+
+ def exit_program() -> NoReturn:
+ if self.update_auto_control:
+ # self.update_auto_control.terminate() hangs in PySide6
+ self.update_auto_control.quit()
+ self.capture_method.close(self)
+ if event is not None:
+ event.accept()
+ if self.is_auto_controlled:
+ # stop main thread (which is probably blocked reading input) via an interrupt signal
+ os.kill(os.getpid(), signal.SIGINT)
+ sys.exit()
+
+ # `event is None` simulates LiveSplit quitting without asking.
+ # This also more gracefully exits LiveSplit
+ # Users can still manually save their settings
+ if event is None or not user_profile.have_settings_changed(self):
+ exit_program()
+
+ # Give a different warning if there was never a settings file that was loaded successfully,
+ # and "save as" instead of "save".
+ settings_file_name = (
+ "Untitled"
+ if not self.last_successfully_loaded_settings_file_path
+ else os.path.basename(self.last_successfully_loaded_settings_file_path)
+ )
+
+ warning = QMessageBox.warning(
+ self,
+ "AutoSplit",
+ f"Do you want to save changes made to settings file {settings_file_name}?",
+ QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No | QMessageBox.StandardButton.Cancel,
+ )
+
+ if (
+ (warning is QMessageBox.StandardButton.Yes and user_profile.save_settings(self))
+ or warning is QMessageBox.StandardButton.No
+ ):
+ exit_program()
+
+ # Fallthrough case: Prevent program from closing.
+ event.ignore()
+
+
+def set_preview_image(qlabel: QLabel, image: MatLike | None):
+ if not is_valid_image(image):
+ # Clear current pixmap if no image. But don't clear text
+ if not qlabel.text():
+ qlabel.clear()
+ else:
+ height, width, channels = image.shape
+
+ if channels == BGRA_CHANNEL_COUNT:
+ image_format = QtGui.QImage.Format.Format_RGBA8888
+ capture = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA)
+ else:
+ image_format = QtGui.QImage.Format.Format_BGR888
+ capture = image
+
+ qimage = QtGui.QImage(
+ capture.data, # pyright: ignore[reportGeneralTypeIssues] # https://bugreports.qt.io/browse/PYSIDE-2476
+ width,
+ height,
+ width * channels,
+ image_format,
+ )
+ qlabel.setPixmap(
+ QtGui.QPixmap(qimage).scaled(
+ qlabel.size(),
+ QtCore.Qt.AspectRatioMode.IgnoreAspectRatio,
+ QtCore.Qt.TransformationMode.SmoothTransformation,
+ ),
+ )
+
+
+def seconds_remaining_text(seconds: float):
+ return f"{seconds:.1f} second{'' if 0 < seconds <= 1 else 's'} remaining"
+
+
+def is_already_open():
+ # When running directly in Python, any AutoSplit process means it's already open
+ # When bundled, we must ignore itself and the splash screen
+ max_processes = 3 if FROZEN else 1
+ process_count = 0
+ for process in process_iter():
+ if process.name() == "AutoSplit.exe":
+ process_count += 1
+ if process_count >= max_processes:
+ return True
+ return False
+
+
+def main():
+ # Best to call setStyle before the QApplication constructor
+ # https://doc.qt.io/qt-6/qapplication.html#setStyle-1
+ QApplication.setStyle("fusion")
+ # Call to QApplication outside the try-except so we can show error messages
+ app = QApplication(sys.argv)
+ try:
+ app.setWindowIcon(QtGui.QIcon(":/resources/icon.ico"))
+
+ if is_already_open():
+ error_messages.already_open()
+
+ AutoSplit()
+
+ if not FROZEN:
+ # Kickoff the event loop every so often so we can handle KeyboardInterrupt (^C)
+ timer = QtCore.QTimer()
+ timer.timeout.connect(lambda: None)
+ timer.start(500)
+
+ exit_code = app.exec()
+ except Exception as exception: # noqa: BLE001 # We really want to catch everything here
+ error_messages.handle_top_level_exceptions(exception)
+
+ # Catch Keyboard Interrupts for a clean close
+ signal.signal(signal.SIGINT, lambda code, _: sys.exit(code))
+
+ sys.exit(exit_code)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/src/AutoSplitImage.py b/src/AutoSplitImage.py
index 34d40851..4b32888b 100644
--- a/src/AutoSplitImage.py
+++ b/src/AutoSplitImage.py
@@ -1,170 +1,165 @@
-from __future__ import annotations
-
-import os
-from enum import IntEnum
-from math import sqrt
-from typing import TYPE_CHECKING
-
-import cv2
-import numpy as np
-from cv2.typing import MatLike
-
-import error_messages
-from compare import COMPARE_METHODS_BY_INDEX, check_if_image_has_transparency
-from utils import BGR_CHANNEL_COUNT, MAXBYTE, ColorChannel, ImageShape, is_valid_image
-
-if TYPE_CHECKING:
-
- from AutoSplit import AutoSplit
-
-
-# Resize to these width and height so that FPS performance increases
-COMPARISON_RESIZE_WIDTH = 320
-COMPARISON_RESIZE_HEIGHT = 240
-COMPARISON_RESIZE = (COMPARISON_RESIZE_WIDTH, COMPARISON_RESIZE_HEIGHT)
-COMPARISON_RESIZE_AREA = COMPARISON_RESIZE_WIDTH * COMPARISON_RESIZE_HEIGHT
-MASK_LOWER_BOUND = np.array([0, 0, 0, 1], dtype="uint8")
-MASK_UPPER_BOUND = np.array([MAXBYTE, MAXBYTE, MAXBYTE, MAXBYTE], dtype="uint8")
-START_KEYWORD = "start_auto_splitter"
-RESET_KEYWORD = "reset"
-
-
-class ImageType(IntEnum):
- SPLIT = 0
- RESET = 1
- START = 2
-
-
-class AutoSplitImage:
- path: str
- filename: str
- flags: int
- loops: int
- image_type: ImageType
- byte_array: MatLike | None = None
- mask: MatLike | None = None
- # This value is internal, check for mask instead
- _has_transparency = False
- # These values should be overriden by some Defaults if None. Use getters instead
- __delay_time: float | None = None
- __comparison_method: int | None = None
- __pause_time: float | None = None
- __similarity_threshold: float | None = None
-
- def get_delay_time(self, default: AutoSplit | int):
- """Get image's delay time or fallback to the default value from spinbox."""
- if self.__delay_time is not None:
- return self.__delay_time
- if isinstance(default, int):
- return default
- return default.settings_dict["default_delay_time"]
-
- def __get_comparison_method(self, default: AutoSplit | int):
- """Get image's comparison or fallback to the default value from combobox."""
- if self.__comparison_method is not None:
- return self.__comparison_method
- if isinstance(default, int):
- return default
- return default.settings_dict["default_comparison_method"]
-
- def get_pause_time(self, default: AutoSplit | float):
- """Get image's pause time or fallback to the default value from spinbox."""
- if self.__pause_time is not None:
- return self.__pause_time
- if isinstance(default, (float, int)):
- return default
- return default.settings_dict["default_pause_time"]
-
- def get_similarity_threshold(self, default: AutoSplit | float):
- """Get image's similarity threshold or fallback to the default value from spinbox."""
- if self.__similarity_threshold is not None:
- return self.__similarity_threshold
- if isinstance(default, (float, int)):
- return default
- return default.settings_dict["default_similarity_threshold"]
-
- def __init__(self, path: str):
- self.path = path
- self.filename = os.path.split(path)[-1].lower()
- self.flags = flags_from_filename(self.filename)
- self.loops = loop_from_filename(self.filename)
- self.__delay_time = delay_time_from_filename(self.filename)
- self.__comparison_method = comparison_method_from_filename(self.filename)
- self.__pause_time = pause_from_filename(self.filename)
- self.__similarity_threshold = threshold_from_filename(self.filename)
- self.__read_image_bytes(path)
-
- if START_KEYWORD in self.filename:
- self.image_type = ImageType.START
- elif RESET_KEYWORD in self.filename:
- self.image_type = ImageType.RESET
- else:
- self.image_type = ImageType.SPLIT
-
- def __read_image_bytes(self, path: str):
- image = cv2.imread(path, cv2.IMREAD_UNCHANGED)
- if not is_valid_image(image):
- self.byte_array = None
- error_messages.image_type(path)
- return
-
- self._has_transparency = check_if_image_has_transparency(image)
- # If image has transparency, create a mask
- if self._has_transparency:
- # Adaptively determine the target size according to
- # the number of nonzero elements in the alpha channel of the split image.
- # This may result in images bigger than COMPARISON_RESIZE if there's plenty of transparency.
- # Which wouldn't incur any performance loss in methods where masked regions are ignored.
- scale = min(1, sqrt(COMPARISON_RESIZE_AREA / cv2.countNonZero(image[:, :, ColorChannel.Alpha])))
-
- image = cv2.resize(
- image,
- dsize=None,
- fx=scale,
- fy=scale,
- interpolation=cv2.INTER_NEAREST,
- )
-
- # Mask based on adaptively resized, nearest neighbor interpolated split image
- self.mask = cv2.inRange(image, MASK_LOWER_BOUND, MASK_UPPER_BOUND)
- else:
- image = cv2.resize(image, COMPARISON_RESIZE, interpolation=cv2.INTER_NEAREST)
- # Add Alpha channel if missing
- if image.shape[ImageShape.Channels] == BGR_CHANNEL_COUNT:
- image = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA)
-
- self.byte_array = image
-
- def check_flag(self, flag: int):
- return self.flags & flag == flag
-
- def compare_with_capture(
- self,
- default: AutoSplit | int,
- capture: MatLike | None,
- ):
- """Compare image with capture using image's comparison method. Falls back to combobox."""
- if not is_valid_image(self.byte_array) or not is_valid_image(capture):
- return 0.0
- resized_capture = cv2.resize(capture, self.byte_array.shape[1::-1])
- comparison_method = self.__get_comparison_method(default)
-
- return COMPARE_METHODS_BY_INDEX.get(
- comparison_method, compare_dummy,
- )(
- self.byte_array, resized_capture, self.mask,
- )
-
-
-def compare_dummy(*_: object): return 0.0
-
-
-if True:
- from split_parser import (
- comparison_method_from_filename,
- delay_time_from_filename,
- flags_from_filename,
- loop_from_filename,
- pause_from_filename,
- threshold_from_filename,
- )
+import os
+from enum import IntEnum, auto
+from math import sqrt
+from typing import TYPE_CHECKING
+
+import cv2
+import numpy as np
+from cv2.typing import MatLike
+
+import error_messages
+from compare import check_if_image_has_transparency, get_comparison_method_by_index
+from utils import BGR_CHANNEL_COUNT, MAXBYTE, ColorChannel, ImageShape, is_valid_image
+
+if TYPE_CHECKING:
+ from AutoSplit import AutoSplit
+
+
+# Resize to these width and height so that FPS performance increases
+COMPARISON_RESIZE_WIDTH = 320
+COMPARISON_RESIZE_HEIGHT = 240
+COMPARISON_RESIZE = (COMPARISON_RESIZE_WIDTH, COMPARISON_RESIZE_HEIGHT)
+COMPARISON_RESIZE_AREA = COMPARISON_RESIZE_WIDTH * COMPARISON_RESIZE_HEIGHT
+MASK_LOWER_BOUND = np.array([0, 0, 0, 1], dtype="uint8")
+MASK_UPPER_BOUND = np.array([MAXBYTE, MAXBYTE, MAXBYTE, MAXBYTE], dtype="uint8")
+START_KEYWORD = "start_auto_splitter"
+RESET_KEYWORD = "reset"
+
+
+class ImageType(IntEnum):
+ SPLIT = auto()
+ RESET = auto()
+ START = auto()
+
+
+class AutoSplitImage:
+ path: str
+ filename: str
+ flags: int
+ loops: int
+ image_type: ImageType
+ byte_array: MatLike | None = None
+ mask: MatLike | None = None
+ # This value is internal, check for mask instead
+ _has_transparency = False
+ # These values should be overriden by some Defaults if None. Use getters instead
+ __delay_time: float | None = None
+ __comparison_method: int | None = None
+ __pause_time: float | None = None
+ __similarity_threshold: float | None = None
+
+ def get_delay_time(self, default: "AutoSplit | int"):
+ """Get image's delay time or fallback to the default value from spinbox."""
+ if self.__delay_time is not None:
+ return self.__delay_time
+ if isinstance(default, int):
+ return default
+ return default.settings_dict["default_delay_time"]
+
+ def __get_comparison_method_index(self, default: "AutoSplit | int"):
+ """Get image's comparison or fallback to the default value from combobox."""
+ if self.__comparison_method is not None:
+ return self.__comparison_method
+ if isinstance(default, int):
+ return default
+ return default.settings_dict["default_comparison_method"]
+
+ def get_pause_time(self, default: "AutoSplit | float"):
+ """Get image's pause time or fallback to the default value from spinbox."""
+ if self.__pause_time is not None:
+ return self.__pause_time
+ if isinstance(default, (float, int)):
+ return default
+ return default.settings_dict["default_pause_time"]
+
+ def get_similarity_threshold(self, default: "AutoSplit | float"):
+ """Get image's similarity threshold or fallback to the default value from spinbox."""
+ if self.__similarity_threshold is not None:
+ return self.__similarity_threshold
+ if isinstance(default, (float, int)):
+ return default
+ return default.settings_dict["default_similarity_threshold"]
+
+ def __init__(self, path: str):
+ self.path = path
+ self.filename = os.path.split(path)[-1].lower()
+ self.flags = flags_from_filename(self.filename)
+ self.loops = loop_from_filename(self.filename)
+ self.__delay_time = delay_time_from_filename(self.filename)
+ self.__comparison_method = comparison_method_from_filename(self.filename)
+ self.__pause_time = pause_from_filename(self.filename)
+ self.__similarity_threshold = threshold_from_filename(self.filename)
+ self.__read_image_bytes(path)
+
+ if START_KEYWORD in self.filename:
+ self.image_type = ImageType.START
+ elif RESET_KEYWORD in self.filename:
+ self.image_type = ImageType.RESET
+ else:
+ self.image_type = ImageType.SPLIT
+
+ def __read_image_bytes(self, path: str):
+ image = cv2.imread(path, cv2.IMREAD_UNCHANGED)
+ if not is_valid_image(image):
+ self.byte_array = None
+ error_messages.image_type(path)
+ return
+
+ self._has_transparency = check_if_image_has_transparency(image)
+ # If image has transparency, create a mask
+ if self._has_transparency:
+ # Adaptively determine the target size according to
+ # the number of nonzero elements in the alpha channel of the split image.
+ # This may result in images bigger than COMPARISON_RESIZE if there's plenty of transparency.
+ # Which wouldn't incur any performance loss in methods where masked regions are ignored.
+ scale = min(1, sqrt(COMPARISON_RESIZE_AREA / cv2.countNonZero(image[:, :, ColorChannel.Alpha])))
+
+ image = cv2.resize(
+ image,
+ dsize=None,
+ fx=scale,
+ fy=scale,
+ interpolation=cv2.INTER_NEAREST,
+ )
+
+ # Mask based on adaptively resized, nearest neighbor interpolated split image
+ self.mask = cv2.inRange(image, MASK_LOWER_BOUND, MASK_UPPER_BOUND)
+ else:
+ image = cv2.resize(image, COMPARISON_RESIZE, interpolation=cv2.INTER_NEAREST)
+ # Add Alpha channel if missing
+ if image.shape[ImageShape.Channels] == BGR_CHANNEL_COUNT:
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA)
+
+ self.byte_array = image
+
+ def check_flag(self, flag: int):
+ return self.flags & flag == flag
+
+ def compare_with_capture(
+ self,
+ default: "AutoSplit | int",
+ capture: MatLike | None,
+ ):
+ """Compare image with capture using image's comparison method. Falls back to combobox."""
+ if not is_valid_image(self.byte_array) or not is_valid_image(capture):
+ return 0.0
+ resized_capture = cv2.resize(capture, self.byte_array.shape[1::-1])
+
+ return get_comparison_method_by_index(
+ self.__get_comparison_method_index(default),
+ )(
+ self.byte_array,
+ resized_capture,
+ self.mask,
+ )
+
+
+if True:
+ from split_parser import (
+ comparison_method_from_filename,
+ delay_time_from_filename,
+ flags_from_filename,
+ loop_from_filename,
+ pause_from_filename,
+ threshold_from_filename,
+ )
diff --git a/src/capture_method/BitBltCaptureMethod.py b/src/capture_method/BitBltCaptureMethod.py
index 887a1f53..ff5d0542 100644
--- a/src/capture_method/BitBltCaptureMethod.py
+++ b/src/capture_method/BitBltCaptureMethod.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
import ctypes
import ctypes.wintypes
from typing import TYPE_CHECKING, cast
@@ -16,7 +14,6 @@
from utils import BGRA_CHANNEL_COUNT, get_window_bounds, is_valid_hwnd, try_delete_dc
if TYPE_CHECKING:
-
from AutoSplit import AutoSplit
# This is an undocumented nFlag value for PrintWindow
@@ -35,7 +32,7 @@ class BitBltCaptureMethod(CaptureMethodBase):
_render_full_content = False
@override
- def get_frame(self, autosplit: AutoSplit) -> tuple[MatLike | None, bool]:
+ def get_frame(self, autosplit: "AutoSplit") -> tuple[MatLike | None, bool]:
selection = autosplit.settings_dict["capture_region"]
hwnd = autosplit.hwnd
image: MatLike | None = None
@@ -80,7 +77,7 @@ def get_frame(self, autosplit: AutoSplit) -> tuple[MatLike | None, bool]:
return image, False
@override
- def recover_window(self, captured_window_title: str, autosplit: AutoSplit):
+ def recover_window(self, captured_window_title: str, autosplit: "AutoSplit"):
hwnd = win32gui.FindWindow(None, captured_window_title)
if not is_valid_hwnd(hwnd):
return False
diff --git a/src/capture_method/CaptureMethodBase.py b/src/capture_method/CaptureMethodBase.py
index 13b822d6..4bea1a5c 100644
--- a/src/capture_method/CaptureMethodBase.py
+++ b/src/capture_method/CaptureMethodBase.py
@@ -1,44 +1,41 @@
-from __future__ import annotations
-
-from typing import TYPE_CHECKING
-
-from cv2.typing import MatLike
-
-from utils import is_valid_hwnd
-
-if TYPE_CHECKING:
-
- from AutoSplit import AutoSplit
-
-
-class CaptureMethodBase:
- name = "None"
- short_description = ""
- description = ""
-
- def __init__(self, autosplit: AutoSplit | None):
- # Some capture methods don't need an initialization process
- pass
-
- def reinitialize(self, autosplit: AutoSplit):
- self.close(autosplit)
- self.__init__(autosplit) # type: ignore[misc]
-
- def close(self, autosplit: AutoSplit):
- # Some capture methods don't need an initialization process
- pass
-
- def get_frame(self, autosplit: AutoSplit) -> tuple[MatLike | None, bool]: # noqa: PLR6301
- """
- Captures an image of the region for a window matching the given
- parameters of the bounding box.
-
- @return: The image of the region in the window in BGRA format
- """
- return None, False
-
- def recover_window(self, captured_window_title: str, autosplit: AutoSplit) -> bool: # noqa: PLR6301
- return False
-
- def check_selected_region_exists(self, autosplit: AutoSplit) -> bool: # noqa: PLR6301
- return is_valid_hwnd(autosplit.hwnd)
+from typing import TYPE_CHECKING
+
+from cv2.typing import MatLike
+
+from utils import is_valid_hwnd
+
+if TYPE_CHECKING:
+ from AutoSplit import AutoSplit
+
+
+class CaptureMethodBase:
+ name = "None"
+ short_description = ""
+ description = ""
+
+ def __init__(self, autosplit: "AutoSplit | None"):
+ # Some capture methods don't need an initialization process
+ pass
+
+ def reinitialize(self, autosplit: "AutoSplit"):
+ self.close(autosplit)
+ self.__init__(autosplit) # type: ignore[misc]
+
+ def close(self, autosplit: "AutoSplit"):
+ # Some capture methods don't need an initialization process
+ pass
+
+ def get_frame(self, autosplit: "AutoSplit") -> tuple[MatLike | None, bool]: # noqa: PLR6301
+ """
+ Captures an image of the region for a window matching the given
+ parameters of the bounding box.
+
+ @return: The image of the region in the window in BGRA format
+ """
+ return None, False
+
+ def recover_window(self, captured_window_title: str, autosplit: "AutoSplit") -> bool: # noqa: PLR6301
+ return False
+
+ def check_selected_region_exists(self, autosplit: "AutoSplit") -> bool: # noqa: PLR6301
+ return is_valid_hwnd(autosplit.hwnd)
diff --git a/src/capture_method/DesktopDuplicationCaptureMethod.py b/src/capture_method/DesktopDuplicationCaptureMethod.py
index ee07ac11..4e4a2525 100644
--- a/src/capture_method/DesktopDuplicationCaptureMethod.py
+++ b/src/capture_method/DesktopDuplicationCaptureMethod.py
@@ -1,65 +1,63 @@
-from __future__ import annotations
-
-import ctypes
-from typing import TYPE_CHECKING, Union, cast
-
-import cv2
-import d3dshot
-import numpy as np
-import win32con
-from typing_extensions import override
-from win32 import win32gui
-
-from capture_method.BitBltCaptureMethod import BitBltCaptureMethod
-from utils import GITHUB_REPOSITORY, get_window_bounds
-
-if TYPE_CHECKING:
- from AutoSplit import AutoSplit
-
-
-class DesktopDuplicationCaptureMethod(BitBltCaptureMethod):
- name = "Direct3D Desktop Duplication"
- short_description = "slower, bound to display"
- description = (
- "\nDuplicates the desktop using Direct3D. "
- + "\nIt can record OpenGL and Hardware Accelerated windows. "
- + "\nAbout 10-15x slower than BitBlt. Not affected by window size. "
- + "\nOverlapping windows will show up and can't record across displays. "
- + "\nThis option may not be available for hybrid GPU laptops, "
- + "\nsee D3DDD-Note-Laptops.md for a solution. "
- + f"\nhttps://www.github.com/{GITHUB_REPOSITORY}#capture-method "
- )
-
- def __init__(self, autosplit: AutoSplit | None):
- super().__init__(autosplit)
- # Must not set statically as some laptops will throw an error
- self.desktop_duplication = d3dshot.create(capture_output="numpy")
-
- @override
- def get_frame(self, autosplit: AutoSplit):
- selection = autosplit.settings_dict["capture_region"]
- hwnd = autosplit.hwnd
- hmonitor = ctypes.windll.user32.MonitorFromWindow(hwnd, win32con.MONITOR_DEFAULTTONEAREST)
- if not hmonitor or not self.check_selected_region_exists(autosplit):
- return None, False
-
- left_bounds, top_bounds, *_ = get_window_bounds(hwnd)
- self.desktop_duplication.display = next(
- display for display
- in self.desktop_duplication.displays
- if display.hmonitor == hmonitor
- )
- offset_x, offset_y, *_ = win32gui.GetWindowRect(hwnd)
- offset_x -= self.desktop_duplication.display.position["left"]
- offset_y -= self.desktop_duplication.display.position["top"]
- left = selection["x"] + offset_x + left_bounds
- top = selection["y"] + offset_y + top_bounds
- right = selection["width"] + left
- bottom = selection["height"] + top
- screenshot = cast(
- Union[np.ndarray[int, np.dtype[np.generic]], None],
- self.desktop_duplication.screenshot((left, top, right, bottom)),
- )
- if screenshot is None:
- return None, False
- return cv2.cvtColor(screenshot, cv2.COLOR_RGB2BGRA), False
+import ctypes
+from typing import TYPE_CHECKING, cast
+
+import cv2
+import d3dshot
+import numpy as np
+import win32con
+from typing_extensions import override
+from win32 import win32gui
+
+from capture_method.BitBltCaptureMethod import BitBltCaptureMethod
+from utils import GITHUB_REPOSITORY, get_window_bounds
+
+if TYPE_CHECKING:
+ from AutoSplit import AutoSplit
+
+
+class DesktopDuplicationCaptureMethod(BitBltCaptureMethod):
+ name = "Direct3D Desktop Duplication"
+ short_description = "slower, bound to display"
+ description = (
+ "\nDuplicates the desktop using Direct3D. "
+ + "\nIt can record OpenGL and Hardware Accelerated windows. "
+ + "\nAbout 10-15x slower than BitBlt. Not affected by window size. "
+ + "\nOverlapping windows will show up and can't record across displays. "
+ + "\nThis option may not be available for hybrid GPU laptops, "
+ + "\nsee D3DDD-Note-Laptops.md for a solution. "
+ + f"\nhttps://www.github.com/{GITHUB_REPOSITORY}#capture-method "
+ )
+
+ def __init__(self, autosplit: "AutoSplit | None"):
+ super().__init__(autosplit)
+ # Must not set statically as some laptops will throw an error
+ self.desktop_duplication = d3dshot.create(capture_output="numpy")
+
+ @override
+ def get_frame(self, autosplit: "AutoSplit"):
+ selection = autosplit.settings_dict["capture_region"]
+ hwnd = autosplit.hwnd
+ hmonitor = ctypes.windll.user32.MonitorFromWindow(hwnd, win32con.MONITOR_DEFAULTTONEAREST)
+ if not hmonitor or not self.check_selected_region_exists(autosplit):
+ return None, False
+
+ left_bounds, top_bounds, *_ = get_window_bounds(hwnd)
+ self.desktop_duplication.display = next(
+ display for display
+ in self.desktop_duplication.displays
+ if display.hmonitor == hmonitor
+ )
+ offset_x, offset_y, *_ = win32gui.GetWindowRect(hwnd)
+ offset_x -= self.desktop_duplication.display.position["left"]
+ offset_y -= self.desktop_duplication.display.position["top"]
+ left = selection["x"] + offset_x + left_bounds
+ top = selection["y"] + offset_y + top_bounds
+ right = selection["width"] + left
+ bottom = selection["height"] + top
+ screenshot = cast(
+ np.ndarray[int, np.dtype[np.generic]] | None,
+ self.desktop_duplication.screenshot((left, top, right, bottom)),
+ )
+ if screenshot is None:
+ return None, False
+ return cv2.cvtColor(screenshot, cv2.COLOR_RGB2BGRA), False
diff --git a/src/capture_method/ForceFullContentRenderingCaptureMethod.py b/src/capture_method/ForceFullContentRenderingCaptureMethod.py
index 6bbcd70e..ebc4cc40 100644
--- a/src/capture_method/ForceFullContentRenderingCaptureMethod.py
+++ b/src/capture_method/ForceFullContentRenderingCaptureMethod.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
from capture_method.BitBltCaptureMethod import BitBltCaptureMethod
diff --git a/src/capture_method/VideoCaptureDeviceCaptureMethod.py b/src/capture_method/VideoCaptureDeviceCaptureMethod.py
index cbf517d1..d7fa9a7d 100644
--- a/src/capture_method/VideoCaptureDeviceCaptureMethod.py
+++ b/src/capture_method/VideoCaptureDeviceCaptureMethod.py
@@ -1,146 +1,144 @@
-from __future__ import annotations
-
-from threading import Event, Thread
-from typing import TYPE_CHECKING
-
-import cv2
-import cv2.Error
-import numpy as np
-from cv2.typing import MatLike
-from pygrabber.dshow_graph import FilterGraph
-from typing_extensions import override
-
-from capture_method.CaptureMethodBase import CaptureMethodBase
-from error_messages import CREATE_NEW_ISSUE_MESSAGE, exception_traceback
-from utils import ImageShape, is_valid_image
-
-if TYPE_CHECKING:
-
- from AutoSplit import AutoSplit
-
-OBS_VIRTUALCAM_PLUGIN_BLANK_PIXEL = [127, 129, 128]
-
-
-def is_blank(image: MatLike):
- # Running np.all on the entire array or looping manually through the
- # entire array is extremely slow when we can't stop early.
- # Instead we check for a few key pixels, in this case, corners
- return np.all(
- image[
- ::image.shape[ImageShape.Y] - 1,
- ::image.shape[ImageShape.X] - 1,
- ] == OBS_VIRTUALCAM_PLUGIN_BLANK_PIXEL,
- )
-
-
-class VideoCaptureDeviceCaptureMethod(CaptureMethodBase):
- name = "Video Capture Device"
- short_description = "see below"
- description = (
- "\nUses a Video Capture Device, like a webcam, virtual cam, or capture card. "
- + "\nYou can select one below. "
- )
-
- capture_device: cv2.VideoCapture
- capture_thread: Thread | None = None
- stop_thread: Event
- last_captured_frame: MatLike | None = None
- is_old_image = False
-
- def __read_loop(self, autosplit: AutoSplit):
- try:
- while not self.stop_thread.is_set():
- try:
- result, image = self.capture_device.read()
- except cv2.error as cv2_error:
- if not (
- cv2_error.code == cv2.Error.STS_ERROR
- and (
- # Likely means the camera is occupied OR the camera index is out of range (like -1)
- cv2_error.msg.endswith("in function 'cv::VideoCapture::grab'\n")
- # Some capture cards we cannot use directly
- # https://github.com/opencv/opencv/issues/23539
- or cv2_error.msg.endswith("in function 'cv::VideoCapture::retrieve'\n")
- )
- ):
- raise
- result = False
- image = None
- if not result:
- image = None
-
- # Blank frame. Reuse the previous one.
- if image is not None and is_blank(image):
- continue
-
- self.last_captured_frame = image
- self.is_old_image = False
- except Exception as exception: # noqa: BLE001 # We really want to catch everything here
- error = exception
- self.capture_device.release()
- autosplit.show_error_signal.emit(
- lambda: exception_traceback(
- error,
- "AutoSplit encountered an unhandled exception while "
- + "trying to grab a frame and has stopped capture. "
- + CREATE_NEW_ISSUE_MESSAGE,
- ),
- )
-
- def __init__(self, autosplit: AutoSplit):
- super().__init__(autosplit)
- self.capture_device = cv2.VideoCapture(autosplit.settings_dict["capture_device_id"])
- self.capture_device.setExceptionMode(True)
- self.stop_thread = Event()
-
- # The video capture device isn't accessible, don't bother with it.
- if not self.capture_device.isOpened():
- return
-
- filter_graph = FilterGraph()
- filter_graph.add_video_input_device(autosplit.settings_dict["capture_device_id"])
- width, height = filter_graph.get_input_device().get_current_format()
- filter_graph.remove_filters()
-
- # Ensure we're using the right camera size. And not OpenCV's default 640x480
- try:
- self.capture_device.set(cv2.CAP_PROP_FRAME_WIDTH, width)
- self.capture_device.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
- except cv2.error:
- # Some cameras don't allow changing the resolution
- pass
- self.capture_thread = Thread(target=lambda: self.__read_loop(autosplit))
- self.capture_thread.start()
-
- @override
- def close(self, autosplit: AutoSplit):
- self.stop_thread.set()
- if self.capture_thread:
- self.capture_thread.join()
- self.capture_thread = None
- self.capture_device.release()
-
- @override
- def get_frame(self, autosplit: AutoSplit):
- if not self.check_selected_region_exists(autosplit):
- return None, False
-
- image = self.last_captured_frame
- is_old_image = self.is_old_image
- self.is_old_image = True
- if not is_valid_image(image):
- return None, is_old_image
-
- selection = autosplit.settings_dict["capture_region"]
- # Ensure we can't go OOB of the image
- y = min(selection["y"], image.shape[ImageShape.Y] - 1)
- x = min(selection["x"], image.shape[ImageShape.X] - 1)
- image = image[
- y:y + selection["height"],
- x:x + selection["width"],
- ]
- return cv2.cvtColor(image, cv2.COLOR_BGR2BGRA), is_old_image
-
- @override
- def check_selected_region_exists(self, autosplit: AutoSplit):
- return bool(self.capture_device.isOpened())
+from threading import Event, Thread
+from typing import TYPE_CHECKING
+
+import cv2
+import cv2.Error
+import numpy as np
+from cv2.typing import MatLike
+from pygrabber.dshow_graph import FilterGraph
+from typing_extensions import override
+
+from capture_method.CaptureMethodBase import CaptureMethodBase
+from error_messages import CREATE_NEW_ISSUE_MESSAGE, exception_traceback
+from utils import ImageShape, is_valid_image
+
+if TYPE_CHECKING:
+ from AutoSplit import AutoSplit
+
+OBS_VIRTUALCAM_PLUGIN_BLANK_PIXEL = [127, 129, 128]
+
+
+def is_blank(image: MatLike):
+ # Running np.all on the entire array or looping manually through the
+ # entire array is extremely slow when we can't stop early.
+ # Instead we check for a few key pixels, in this case, corners
+ return np.all(
+ image[
+ :: image.shape[ImageShape.Y] - 1,
+ :: image.shape[ImageShape.X] - 1,
+ ]
+ == OBS_VIRTUALCAM_PLUGIN_BLANK_PIXEL,
+ )
+
+
+class VideoCaptureDeviceCaptureMethod(CaptureMethodBase):
+ name = "Video Capture Device"
+ short_description = "see below"
+ description = (
+ "\nUses a Video Capture Device, like a webcam, virtual cam, or capture card. "
+ + "\nYou can select one below. "
+ )
+
+ capture_device: cv2.VideoCapture
+ capture_thread: Thread | None = None
+ stop_thread: Event
+ last_captured_frame: MatLike | None = None
+ is_old_image = False
+
+ def __read_loop(self, autosplit: "AutoSplit"):
+ try:
+ while not self.stop_thread.is_set():
+ try:
+ result, image = self.capture_device.read()
+ except cv2.error as cv2_error:
+ if not (
+ cv2_error.code == cv2.Error.STS_ERROR
+ and (
+ # Likely means the camera is occupied OR the camera index is out of range (like -1)
+ cv2_error.msg.endswith("in function 'cv::VideoCapture::grab'\n")
+ # Some capture cards we cannot use directly
+ # https://github.com/opencv/opencv/issues/23539
+ or cv2_error.msg.endswith("in function 'cv::VideoCapture::retrieve'\n")
+ )
+ ):
+ raise
+ result = False
+ image = None
+ if not result:
+ image = None
+
+ # Blank frame. Reuse the previous one.
+ if image is not None and is_blank(image):
+ continue
+
+ self.last_captured_frame = image
+ self.is_old_image = False
+ except Exception as exception: # noqa: BLE001 # We really want to catch everything here
+ error = exception
+ self.capture_device.release()
+ autosplit.show_error_signal.emit(
+ lambda: exception_traceback(
+ error,
+ "AutoSplit encountered an unhandled exception while "
+ + "trying to grab a frame and has stopped capture. "
+ + CREATE_NEW_ISSUE_MESSAGE,
+ ),
+ )
+
+ def __init__(self, autosplit: "AutoSplit"):
+ super().__init__(autosplit)
+ self.capture_device = cv2.VideoCapture(autosplit.settings_dict["capture_device_id"])
+ self.capture_device.setExceptionMode(True)
+ self.stop_thread = Event()
+
+ # The video capture device isn't accessible, don't bother with it.
+ if not self.capture_device.isOpened():
+ return
+
+ filter_graph = FilterGraph()
+ filter_graph.add_video_input_device(autosplit.settings_dict["capture_device_id"])
+ width, height = filter_graph.get_input_device().get_current_format()
+ filter_graph.remove_filters()
+
+ # Ensure we're using the right camera size. And not OpenCV's default 640x480
+ try:
+ self.capture_device.set(cv2.CAP_PROP_FRAME_WIDTH, width)
+ self.capture_device.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
+ except cv2.error:
+ # Some cameras don't allow changing the resolution
+ pass
+ self.capture_thread = Thread(target=lambda: self.__read_loop(autosplit))
+ self.capture_thread.start()
+
+ @override
+ def close(self, autosplit: "AutoSplit"):
+ self.stop_thread.set()
+ if self.capture_thread:
+ self.capture_thread.join()
+ self.capture_thread = None
+ self.capture_device.release()
+
+ @override
+ def get_frame(self, autosplit: "AutoSplit"):
+ if not self.check_selected_region_exists(autosplit):
+ return None, False
+
+ image = self.last_captured_frame
+ is_old_image = self.is_old_image
+ self.is_old_image = True
+ if not is_valid_image(image):
+ return None, is_old_image
+
+ selection = autosplit.settings_dict["capture_region"]
+ # Ensure we can't go OOB of the image
+ y = min(selection["y"], image.shape[ImageShape.Y] - 1)
+ x = min(selection["x"], image.shape[ImageShape.X] - 1)
+ image = image[
+ y: y + selection["height"],
+ x: x + selection["width"],
+ ]
+ return cv2.cvtColor(image, cv2.COLOR_BGR2BGRA), is_old_image
+
+ @override
+ def check_selected_region_exists(self, autosplit: "AutoSplit"):
+ return bool(self.capture_device.isOpened())
diff --git a/src/capture_method/WindowsGraphicsCaptureMethod.py b/src/capture_method/WindowsGraphicsCaptureMethod.py
index 8ba0bb61..f338582b 100644
--- a/src/capture_method/WindowsGraphicsCaptureMethod.py
+++ b/src/capture_method/WindowsGraphicsCaptureMethod.py
@@ -1,155 +1,153 @@
-from __future__ import annotations
-
-import asyncio
-from typing import TYPE_CHECKING, cast
-
-import numpy as np
-from cv2.typing import MatLike
-from typing_extensions import override
-from win32 import win32gui
-from winsdk.windows.graphics import SizeInt32
-from winsdk.windows.graphics.capture import Direct3D11CaptureFramePool, GraphicsCaptureSession
-from winsdk.windows.graphics.capture.interop import create_for_window
-from winsdk.windows.graphics.directx import DirectXPixelFormat
-from winsdk.windows.graphics.imaging import BitmapBufferAccessMode, SoftwareBitmap
-
-from capture_method.CaptureMethodBase import CaptureMethodBase
-from utils import BGRA_CHANNEL_COUNT, WGC_MIN_BUILD, WINDOWS_BUILD_NUMBER, get_direct3d_device, is_valid_hwnd
-
-if TYPE_CHECKING:
-
- from AutoSplit import AutoSplit
-
-WGC_NO_BORDER_MIN_BUILD = 20348
-LEARNING_MODE_DEVICE_BUILD = 17763
-"""https://learn.microsoft.com/en-us/uwp/api/windows.ai.machinelearning.learningmodeldevice"""
-
-
-class WindowsGraphicsCaptureMethod(CaptureMethodBase):
- name = "Windows Graphics Capture"
- short_description = "fast, most compatible, capped at 60fps"
- description = (
- f"\nOnly available in Windows 10.0.{WGC_MIN_BUILD} and up. "
- + f"\nDue to current technical limitations, Windows versions below 10.0.0.{LEARNING_MODE_DEVICE_BUILD}"
- + "\nrequire having at least one audio or video Capture Device connected and enabled."
- + "\nAllows recording UWP apps, Hardware Accelerated and Exclusive Fullscreen windows. "
- + "\nAdds a yellow border on Windows 10 (not on Windows 11)."
- + "\nCaps at around 60 FPS. "
- )
-
- size: SizeInt32
- frame_pool: Direct3D11CaptureFramePool | None = None
- session: GraphicsCaptureSession | None = None
- """This is stored to prevent session from being garbage collected"""
- last_captured_frame: MatLike | None = None
-
- def __init__(self, autosplit: AutoSplit):
- super().__init__(autosplit)
- if not is_valid_hwnd(autosplit.hwnd):
- return
-
- item = create_for_window(autosplit.hwnd)
- frame_pool = Direct3D11CaptureFramePool.create_free_threaded(
- get_direct3d_device(),
- DirectXPixelFormat.B8_G8_R8_A8_UINT_NORMALIZED,
- 1,
- item.size,
- )
- if not frame_pool:
- raise OSError("Unable to create a frame pool for a capture session.")
- session = frame_pool.create_capture_session(item)
- if not session:
- raise OSError("Unable to create a capture session.")
- session.is_cursor_capture_enabled = False
- if WINDOWS_BUILD_NUMBER >= WGC_NO_BORDER_MIN_BUILD:
- session.is_border_required = False
- session.start_capture()
-
- self.session = session
- self.size = item.size
- self.frame_pool = frame_pool
-
- @override
- def close(self, autosplit: AutoSplit):
- if self.frame_pool:
- self.frame_pool.close()
- self.frame_pool = None
- if self.session:
- try:
- self.session.close()
- except OSError:
- # OSError: The application called an interface that was marshalled for a different thread
- # This still seems to close the session and prevent the following hard crash in LiveSplit
- # "AutoSplit.exe " # noqa: E501
- pass
- self.session = None
-
- @override
- def get_frame(self, autosplit: AutoSplit) -> tuple[MatLike | None, bool]:
- selection = autosplit.settings_dict["capture_region"]
- # We still need to check the hwnd because WGC will return a blank black image
- if not (
- self.check_selected_region_exists(autosplit)
- # Only needed for the type-checker
- and self.frame_pool
- ):
- return None, False
-
- try:
- frame = self.frame_pool.try_get_next_frame()
- # Frame pool is closed
- except OSError:
- return None, False
-
- async def coroutine():
- # We were too fast and the next frame wasn't ready yet
- if not frame:
- return None
- return await (SoftwareBitmap.create_copy_from_surface_async(frame.surface) or asyncio.sleep(0, None))
- try:
- software_bitmap = asyncio.run(coroutine())
- except SystemError as exception:
- # HACK: can happen when closing the GraphicsCapturePicker
- if str(exception).endswith("returned a result with an error set"):
- return self.last_captured_frame, True
- raise
-
- if not software_bitmap:
- # HACK: Can happen when starting the region selector
- return self.last_captured_frame, True
- # raise ValueError("Unable to convert Direct3D11CaptureFrame to SoftwareBitmap.")
- bitmap_buffer = software_bitmap.lock_buffer(BitmapBufferAccessMode.READ_WRITE)
- if not bitmap_buffer:
- raise ValueError("Unable to obtain the BitmapBuffer from SoftwareBitmap.")
- reference = bitmap_buffer.create_reference()
- image = np.frombuffer(cast(bytes, reference), dtype=np.uint8)
- image.shape = (self.size.height, self.size.width, BGRA_CHANNEL_COUNT)
- image = image[
- selection["y"]:selection["y"] + selection["height"],
- selection["x"]:selection["x"] + selection["width"],
- ]
- self.last_captured_frame = image
- return image, False
-
- @override
- def recover_window(self, captured_window_title: str, autosplit: AutoSplit):
- hwnd = win32gui.FindWindow(None, captured_window_title)
- if not is_valid_hwnd(hwnd):
- return False
- autosplit.hwnd = hwnd
- try:
- self.reinitialize(autosplit)
- # Unrecordable hwnd found as the game is crashing
- except OSError as exception:
- if str(exception).endswith("The parameter is incorrect"):
- return False
- raise
- return self.check_selected_region_exists(autosplit)
-
- @override
- def check_selected_region_exists(self, autosplit: AutoSplit):
- return bool(
- is_valid_hwnd(autosplit.hwnd)
- and self.frame_pool
- and self.session,
- )
+import asyncio
+from typing import TYPE_CHECKING, cast
+
+import numpy as np
+from cv2.typing import MatLike
+from typing_extensions import override
+from win32 import win32gui
+from winsdk.windows.graphics import SizeInt32
+from winsdk.windows.graphics.capture import Direct3D11CaptureFramePool, GraphicsCaptureSession
+from winsdk.windows.graphics.capture.interop import create_for_window
+from winsdk.windows.graphics.directx import DirectXPixelFormat
+from winsdk.windows.graphics.imaging import BitmapBufferAccessMode, SoftwareBitmap
+
+from capture_method.CaptureMethodBase import CaptureMethodBase
+from utils import BGRA_CHANNEL_COUNT, WGC_MIN_BUILD, WINDOWS_BUILD_NUMBER, get_direct3d_device, is_valid_hwnd
+
+if TYPE_CHECKING:
+ from AutoSplit import AutoSplit
+
+WGC_NO_BORDER_MIN_BUILD = 20348
+LEARNING_MODE_DEVICE_BUILD = 17763
+"""https://learn.microsoft.com/en-us/uwp/api/windows.ai.machinelearning.learningmodeldevice"""
+
+
+class WindowsGraphicsCaptureMethod(CaptureMethodBase):
+ name = "Windows Graphics Capture"
+ short_description = "fast, most compatible, capped at 60fps"
+ description = (
+ f"\nOnly available in Windows 10.0.{WGC_MIN_BUILD} and up. "
+ + f"\nDue to current technical limitations, Windows versions below 10.0.0.{LEARNING_MODE_DEVICE_BUILD}"
+ + "\nrequire having at least one audio or video Capture Device connected and enabled."
+ + "\nAllows recording UWP apps, Hardware Accelerated and Exclusive Fullscreen windows. "
+ + "\nAdds a yellow border on Windows 10 (not on Windows 11)."
+ + "\nCaps at around 60 FPS. "
+ )
+
+ size: SizeInt32
+ frame_pool: Direct3D11CaptureFramePool | None = None
+ session: GraphicsCaptureSession | None = None
+ """This is stored to prevent session from being garbage collected"""
+ last_captured_frame: MatLike | None = None
+
+ def __init__(self, autosplit: "AutoSplit"):
+ super().__init__(autosplit)
+ if not is_valid_hwnd(autosplit.hwnd):
+ return
+
+ item = create_for_window(autosplit.hwnd)
+ frame_pool = Direct3D11CaptureFramePool.create_free_threaded(
+ get_direct3d_device(),
+ DirectXPixelFormat.B8_G8_R8_A8_UINT_NORMALIZED,
+ 1,
+ item.size,
+ )
+ if not frame_pool:
+ raise OSError("Unable to create a frame pool for a capture session.")
+ session = frame_pool.create_capture_session(item)
+ if not session:
+ raise OSError("Unable to create a capture session.")
+ session.is_cursor_capture_enabled = False
+ if WINDOWS_BUILD_NUMBER >= WGC_NO_BORDER_MIN_BUILD:
+ session.is_border_required = False
+ session.start_capture()
+
+ self.session = session
+ self.size = item.size
+ self.frame_pool = frame_pool
+
+ @override
+ def close(self, autosplit: "AutoSplit"):
+ if self.frame_pool:
+ self.frame_pool.close()
+ self.frame_pool = None
+ if self.session:
+ try:
+ self.session.close()
+ except OSError:
+ # OSError: The application called an interface that was marshalled for a different thread
+ # This still seems to close the session and prevent the following hard crash in LiveSplit
+ # "AutoSplit.exe " # noqa: E501
+ pass
+ self.session = None
+
+ @override
+ def get_frame(self, autosplit: "AutoSplit") -> tuple[MatLike | None, bool]:
+ selection = autosplit.settings_dict["capture_region"]
+ # We still need to check the hwnd because WGC will return a blank black image
+ if not (
+ self.check_selected_region_exists(autosplit)
+ # Only needed for the type-checker
+ and self.frame_pool
+ ):
+ return None, False
+
+ try:
+ frame = self.frame_pool.try_get_next_frame()
+ # Frame pool is closed
+ except OSError:
+ return None, False
+
+ async def coroutine():
+ # We were too fast and the next frame wasn't ready yet
+ if not frame:
+ return None
+ return await (SoftwareBitmap.create_copy_from_surface_async(frame.surface) or asyncio.sleep(0, None))
+
+ try:
+ software_bitmap = asyncio.run(coroutine())
+ except SystemError as exception:
+ # HACK: can happen when closing the GraphicsCapturePicker
+ if str(exception).endswith("returned a result with an error set"):
+ return self.last_captured_frame, True
+ raise
+
+ if not software_bitmap:
+ # HACK: Can happen when starting the region selector
+ return self.last_captured_frame, True
+ # raise ValueError("Unable to convert Direct3D11CaptureFrame to SoftwareBitmap.")
+ bitmap_buffer = software_bitmap.lock_buffer(BitmapBufferAccessMode.READ_WRITE)
+ if not bitmap_buffer:
+ raise ValueError("Unable to obtain the BitmapBuffer from SoftwareBitmap.")
+ reference = bitmap_buffer.create_reference()
+ image = np.frombuffer(cast(bytes, reference), dtype=np.uint8)
+ image.shape = (self.size.height, self.size.width, BGRA_CHANNEL_COUNT)
+ image = image[
+ selection["y"]: selection["y"] + selection["height"],
+ selection["x"]: selection["x"] + selection["width"],
+ ]
+ self.last_captured_frame = image
+ return image, False
+
+ @override
+ def recover_window(self, captured_window_title: str, autosplit: "AutoSplit"):
+ hwnd = win32gui.FindWindow(None, captured_window_title)
+ if not is_valid_hwnd(hwnd):
+ return False
+ autosplit.hwnd = hwnd
+ try:
+ self.reinitialize(autosplit)
+ # Unrecordable hwnd found as the game is crashing
+ except OSError as exception:
+ if str(exception).endswith("The parameter is incorrect"):
+ return False
+ raise
+ return self.check_selected_region_exists(autosplit)
+
+ @override
+ def check_selected_region_exists(self, autosplit: "AutoSplit"):
+ return bool(
+ is_valid_hwnd(autosplit.hwnd)
+ and self.frame_pool
+ and self.session,
+ )
diff --git a/src/capture_method/__init__.py b/src/capture_method/__init__.py
index 466cdea5..b03b6582 100644
--- a/src/capture_method/__init__.py
+++ b/src/capture_method/__init__.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
import asyncio
from collections import OrderedDict
from dataclasses import dataclass
@@ -62,11 +60,9 @@ def __eq__(self, other: object):
def __hash__(self):
return self.value.__hash__()
- # https://github.com/python/typeshed/issues/10428
@override
- def _generate_next_value_( # type:ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
- name: str | CaptureMethodEnum, *_, # noqa: N805
- ):
+ @staticmethod
+ def _generate_next_value_(name: "str | CaptureMethodEnum", *_):
return name
NONE = ""
@@ -127,6 +123,7 @@ def get(self, key: CaptureMethodEnum, __default: object = None):
CAPTURE_METHODS[CaptureMethodEnum.BITBLT] = BitBltCaptureMethod
try: # Test for laptop cross-GPU Desktop Duplication issue
import d3dshot
+
d3dshot.create(capture_output="numpy")
except (ModuleNotFoundError, COMError):
pass
@@ -136,7 +133,7 @@ def get(self, key: CaptureMethodEnum, __default: object = None):
CAPTURE_METHODS[CaptureMethodEnum.VIDEO_CAPTURE_DEVICE] = VideoCaptureDeviceCaptureMethod
-def change_capture_method(selected_capture_method: CaptureMethodEnum, autosplit: AutoSplit):
+def change_capture_method(selected_capture_method: CaptureMethodEnum, autosplit: "AutoSplit"):
autosplit.capture_method.close(autosplit)
autosplit.capture_method = CAPTURE_METHODS.get(selected_capture_method)(autosplit)
if selected_capture_method == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE:
@@ -156,11 +153,6 @@ class CameraInfo:
resolution: tuple[int, int]
-def get_input_devices():
- """https://github.com/andreaschiavinato/python_grabber/pull/24 ."""
- return cast(list[str], FilterGraph().get_input_devices())
-
-
def get_input_device_resolution(index: int):
filter_graph = FilterGraph()
try:
@@ -176,7 +168,7 @@ def get_input_device_resolution(index: int):
async def get_all_video_capture_devices() -> list[CameraInfo]:
- named_video_inputs = get_input_devices()
+ named_video_inputs = FilterGraph().get_input_devices()
async def get_camera_info(index: int, device_name: str):
backend = ""
@@ -203,7 +195,8 @@ async def get_camera_info(index: int, device_name: str):
else None
return [
- camera_info for camera_info
+ camera_info
+ for camera_info
# Note: Return type required https://github.com/python/typeshed/issues/2652
in await asyncio.gather(*starmap(get_camera_info, enumerate(named_video_inputs)))
if camera_info is not None
diff --git a/src/compare.py b/src/compare.py
index 494381f4..fe021e34 100644
--- a/src/compare.py
+++ b/src/compare.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
from math import sqrt
import cv2
@@ -102,6 +100,22 @@ def compare_phash(source: MatLike, capture: MatLike, mask: MatLike | None = None
return 1 - (hash_diff / 64.0)
+def get_comparison_method_by_index(comparison_method_index: int):
+ match comparison_method_index:
+ case 0:
+ return compare_l2_norm
+ case 1:
+ return compare_histograms
+ case 2:
+ return compare_phash
+ case _:
+ return __compare_dummy
+
+
+def __compare_dummy(*_: object):
+ return 0.0
+
+
def check_if_image_has_transparency(image: MatLike):
# Check if there's a transparency channel (4th channel) and if at least one pixel is transparent (< 255)
if image.shape[ImageShape.Channels] != BGRA_CHANNEL_COUNT:
@@ -114,6 +128,3 @@ def check_if_image_has_transparency(image: MatLike):
# (the image appears as all black in windows, so it's not obvious for the user what they did wrong)
return mean != MAXBYTE
-
-
-COMPARE_METHODS_BY_INDEX = {0: compare_l2_norm, 1: compare_histograms, 2: compare_phash}
diff --git a/src/error_messages.py b/src/error_messages.py
index 3a44f023..465d4dad 100644
--- a/src/error_messages.py
+++ b/src/error_messages.py
@@ -1,194 +1,195 @@
-"""Error messages."""
-from __future__ import annotations
-
-import os
-import signal
-import sys
-import traceback
-from types import TracebackType
-from typing import TYPE_CHECKING, NoReturn
-
-from PySide6 import QtCore, QtWidgets
-
-from utils import FROZEN, GITHUB_REPOSITORY
-
-if TYPE_CHECKING:
- from AutoSplit import AutoSplit
-
-
-def __exit_program():
- # stop main thread (which is probably blocked reading input) via an interrupt signal
- os.kill(os.getpid(), signal.SIGINT)
- sys.exit(1)
-
-
-def set_text_message(message: str, details: str = "", kill_button: str = "", accept_button: str = ""):
- message_box = QtWidgets.QMessageBox()
- message_box.setWindowTitle("Error")
- message_box.setTextFormat(QtCore.Qt.TextFormat.RichText)
- message_box.setText(message)
- # Button order is important for default focus
- if accept_button:
- message_box.addButton(accept_button, QtWidgets.QMessageBox.ButtonRole.AcceptRole)
- if kill_button:
- force_quit_button = message_box.addButton(kill_button, QtWidgets.QMessageBox.ButtonRole.ResetRole)
- force_quit_button.clicked.connect(__exit_program)
- if details:
- message_box.setDetailedText(details)
- # Preopen the details
- for button in message_box.buttons():
- if message_box.buttonRole(button) == QtWidgets.QMessageBox.ButtonRole.ActionRole:
- button.click()
- break
- message_box.exec()
-
-
-def split_image_directory():
- set_text_message("No split image folder is selected.")
-
-
-def split_image_directory_not_found():
- set_text_message("The Split Image Folder does not exist.")
-
-
-def split_image_directory_empty():
- set_text_message("The Split Image Folder is empty.")
-
-
-def image_type(image: str):
- set_text_message(
- f"{image!r} is not a valid image file, does not exist, "
- + "or the full image file path contains a special character.",
- )
-
-
-def region():
- set_text_message(
- "No region is selected or the Capture Region window is not open. "
- + "Select a region or load settings while the Capture Region window is open.",
- )
-
-
-def split_hotkey():
- set_text_message("No split hotkey has been set.")
-
-
-def pause_hotkey():
- set_text_message(
- "Your split image folder contains an image filename with a pause flag {p}, but no pause hotkey is set.",
- )
-
-
-def image_validity(image: str = "File"):
- set_text_message(f"{image} not a valid image file")
-
-
-def alignment_not_matched():
- set_text_message("No area in capture region matched reference image. Alignment failed.")
-
-
-def no_keyword_image(keyword: str):
- set_text_message(f"Your split image folder does not contain an image with the keyword {keyword!r}.")
-
-
-def multiple_keyword_images(keyword: str):
- set_text_message(f"Only one image with the keyword {keyword!r} is allowed.")
-
-
-def reset_hotkey():
- set_text_message("Your split image folder contains a Reset Image, but no reset hotkey is set.")
-
-
-def old_version_settings_file():
- set_text_message(
- "Old version settings file detected. This version allows settings files in .toml format. Starting from v2.0.",
- )
-
-
-def invalid_settings():
- set_text_message("Invalid settings file.")
-
-
-def invalid_hotkey(hotkey_name: str):
- set_text_message(f"Invalid hotkey {hotkey_name!r}")
-
-
-def no_settings_file_on_open():
- set_text_message(
- "No settings file found. One can be loaded on open if placed in the same folder as the AutoSplit executable.",
- )
-
-
-def too_many_settings_files_on_open():
- set_text_message(
- "Too many settings files found. "
- + "Only one can be loaded on open if placed in the same folder as the AutoSplit executable.",
- )
-
-
-def check_for_updates():
- set_text_message("An error occurred while attempting to check for updates. Please check your connection.")
-
-
-def load_start_image():
- set_text_message(
- "Start Image found, but cannot be loaded unless Start hotkey is set. "
- + "Please set the hotkey, and then click the Reload Start Image button.",
- )
-
-
-def stdin_lost():
- set_text_message("stdin not supported or lost, external control like LiveSplit integration will not work.")
-
-
-def already_open():
- set_text_message(
- "An instance of AutoSplit is already running.
Are you sure you want to open a another one?",
- "",
- "Don't open",
- "Ignore",
- )
-
-
-def exception_traceback(exception: BaseException, message: str = ""):
- if not message:
- message = "AutoSplit encountered an unhandled exception and will try to recover, " + \
- f"however, there is no guarantee it will keep working properly. {CREATE_NEW_ISSUE_MESSAGE}"
- set_text_message(
- message,
- "\n".join(traceback.format_exception(None, exception, exception.__traceback__)),
- "Close AutoSplit",
- )
-
-
-CREATE_NEW_ISSUE_MESSAGE = (
- f"Please create a New Issue at "
- + f"github.com/{GITHUB_REPOSITORY}/issues, describe what happened, "
- + "and copy & paste the entire error message below"
-)
-
-
-def make_excepthook(autosplit: AutoSplit):
- def excepthook(exception_type: type[BaseException], exception: BaseException, _traceback: TracebackType | None):
- # Catch Keyboard Interrupts for a clean close
- if exception_type is KeyboardInterrupt or isinstance(exception, KeyboardInterrupt):
- sys.exit(0)
- # HACK: Can happen when starting the region selector while capturing with WindowsGraphicsCapture
- if (
- exception_type is SystemError
- and str(exception) == " returned a result with an error set"
- ):
- return
- # Whithin LiveSplit excepthook needs to use MainWindow's signals to show errors
- autosplit.show_error_signal.emit(lambda: exception_traceback(exception))
- return excepthook
-
-
-def handle_top_level_exceptions(exception: Exception) -> NoReturn:
- message = f"AutoSplit encountered an unrecoverable exception and will likely now close. {CREATE_NEW_ISSUE_MESSAGE}"
- # Print error to console if not running in executable
- if FROZEN:
- exception_traceback(exception, message)
- else:
- traceback.print_exception(type(exception), exception, exception.__traceback__)
- sys.exit(1)
+"""Error messages."""
+import os
+import signal
+import sys
+import traceback
+from types import TracebackType
+from typing import TYPE_CHECKING, NoReturn
+
+from PySide6 import QtCore, QtWidgets
+
+from utils import FROZEN, GITHUB_REPOSITORY
+
+if TYPE_CHECKING:
+ from AutoSplit import AutoSplit
+
+
+def __exit_program():
+ # stop main thread (which is probably blocked reading input) via an interrupt signal
+ os.kill(os.getpid(), signal.SIGINT)
+ sys.exit(1)
+
+
+def set_text_message(message: str, details: str = "", kill_button: str = "", accept_button: str = ""):
+ message_box = QtWidgets.QMessageBox()
+ message_box.setWindowTitle("Error")
+ message_box.setTextFormat(QtCore.Qt.TextFormat.RichText)
+ message_box.setText(message)
+ # Button order is important for default focus
+ if accept_button:
+ message_box.addButton(accept_button, QtWidgets.QMessageBox.ButtonRole.AcceptRole)
+ if kill_button:
+ force_quit_button = message_box.addButton(kill_button, QtWidgets.QMessageBox.ButtonRole.ResetRole)
+ force_quit_button.clicked.connect(__exit_program)
+ if details:
+ message_box.setDetailedText(details)
+ # Preopen the details
+ for button in message_box.buttons():
+ if message_box.buttonRole(button) == QtWidgets.QMessageBox.ButtonRole.ActionRole:
+ button.click()
+ break
+ message_box.exec()
+
+
+def split_image_directory():
+ set_text_message("No split image folder is selected.")
+
+
+def split_image_directory_not_found():
+ set_text_message("The Split Image Folder does not exist.")
+
+
+def split_image_directory_empty():
+ set_text_message("The Split Image Folder is empty.")
+
+
+def image_type(image: str):
+ set_text_message(
+ f"{image!r} is not a valid image file, does not exist, "
+ + "or the full image file path contains a special character.",
+ )
+
+
+def region():
+ set_text_message(
+ "No region is selected or the Capture Region window is not open. "
+ + "Select a region or load settings while the Capture Region window is open.",
+ )
+
+
+def split_hotkey():
+ set_text_message("No split hotkey has been set.")
+
+
+def pause_hotkey():
+ set_text_message(
+ "Your split image folder contains an image filename with a pause flag {p}, but no pause hotkey is set.",
+ )
+
+
+def image_validity(image: str = "File"):
+ set_text_message(f"{image} not a valid image file")
+
+
+def alignment_not_matched():
+ set_text_message("No area in capture region matched reference image. Alignment failed.")
+
+
+def no_keyword_image(keyword: str):
+ set_text_message(f"Your split image folder does not contain an image with the keyword {keyword!r}.")
+
+
+def multiple_keyword_images(keyword: str):
+ set_text_message(f"Only one image with the keyword {keyword!r} is allowed.")
+
+
+def reset_hotkey():
+ set_text_message("Your split image folder contains a Reset Image, but no reset hotkey is set.")
+
+
+def old_version_settings_file():
+ set_text_message(
+ "Old version settings file detected. This version allows settings files in .toml format. Starting from v2.0.",
+ )
+
+
+def invalid_settings():
+ set_text_message("Invalid settings file.")
+
+
+def invalid_hotkey(hotkey_name: str):
+ set_text_message(f"Invalid hotkey {hotkey_name!r}")
+
+
+def no_settings_file_on_open():
+ set_text_message(
+ "No settings file found. One can be loaded on open if placed in the same folder as the AutoSplit executable.",
+ )
+
+
+def too_many_settings_files_on_open():
+ set_text_message(
+ "Too many settings files found. "
+ + "Only one can be loaded on open if placed in the same folder as the AutoSplit executable.",
+ )
+
+
+def check_for_updates():
+ set_text_message("An error occurred while attempting to check for updates. Please check your connection.")
+
+
+def load_start_image():
+ set_text_message(
+ "Start Image found, but cannot be loaded unless Start hotkey is set. "
+ + "Please set the hotkey, and then click the Reload Start Image button.",
+ )
+
+
+def stdin_lost():
+ set_text_message("stdin not supported or lost, external control like LiveSplit integration will not work.")
+
+
+def already_open():
+ set_text_message(
+ "An instance of AutoSplit is already running.
Are you sure you want to open a another one?",
+ "",
+ "Don't open",
+ "Ignore",
+ )
+
+
+def exception_traceback(exception: BaseException, message: str = ""):
+ if not message:
+ message = (
+ "AutoSplit encountered an unhandled exception and will try to recover, "
+ + f"however, there is no guarantee it will keep working properly. {CREATE_NEW_ISSUE_MESSAGE}"
+ )
+ set_text_message(
+ message,
+ "\n".join(traceback.format_exception(None, exception, exception.__traceback__)),
+ "Close AutoSplit",
+ )
+
+
+CREATE_NEW_ISSUE_MESSAGE = (
+ f"Please create a New Issue at "
+ + f"github.com/{GITHUB_REPOSITORY}/issues, describe what happened, "
+ + "and copy & paste the entire error message below"
+)
+
+
+def make_excepthook(autosplit: "AutoSplit"):
+ def excepthook(exception_type: type[BaseException], exception: BaseException, _traceback: TracebackType | None):
+ # Catch Keyboard Interrupts for a clean close
+ if exception_type is KeyboardInterrupt or isinstance(exception, KeyboardInterrupt):
+ sys.exit(0)
+ # HACK: Can happen when starting the region selector while capturing with WindowsGraphicsCapture
+ if (
+ exception_type is SystemError
+ and str(exception) == " returned a result with an error set"
+ ):
+ return
+ # Whithin LiveSplit excepthook needs to use MainWindow's signals to show errors
+ autosplit.show_error_signal.emit(lambda: exception_traceback(exception))
+
+ return excepthook
+
+
+def handle_top_level_exceptions(exception: Exception) -> NoReturn:
+ message = f"AutoSplit encountered an unrecoverable exception and will likely now close. {CREATE_NEW_ISSUE_MESSAGE}"
+ # Print error to console if not running in executable
+ if FROZEN:
+ exception_traceback(exception, message)
+ else:
+ traceback.print_exception(type(exception), exception, exception.__traceback__)
+ sys.exit(1)
diff --git a/src/hotkeys.py b/src/hotkeys.py
index dfa79051..1109cedb 100644
--- a/src/hotkeys.py
+++ b/src/hotkeys.py
@@ -1,310 +1,309 @@
-from __future__ import annotations
-
-from collections.abc import Callable
-from typing import TYPE_CHECKING, Literal, cast
-
-import keyboard
-import pyautogui
-from PySide6 import QtWidgets
-
-import error_messages
-from utils import fire_and_forget, is_digit
-
-if TYPE_CHECKING:
- from AutoSplit import AutoSplit
-
-# While not usually recommended, we don't manipulate the mouse, and we don't want the extra delay
-pyautogui.FAILSAFE = False
-
-SET_HOTKEY_TEXT = "Set Hotkey"
-PRESS_A_KEY_TEXT = "Press a key..."
-
-Commands = Literal["split", "start", "pause", "reset", "skip", "undo"]
-Hotkey = Literal["split", "reset", "skip_split", "undo_split", "pause", "screenshot", "toggle_auto_reset_image"]
-HOTKEYS: list[Hotkey] = ["split", "reset", "skip_split", "undo_split", "pause", "screenshot", "toggle_auto_reset_image"]
-
-
-def remove_all_hotkeys():
- keyboard.unhook_all()
-
-
-def before_setting_hotkey(autosplit: AutoSplit):
- """Do all of these after you click "Set Hotkey" but before you type the hotkey."""
- autosplit.start_auto_splitter_button.setEnabled(False)
- if autosplit.SettingsWidget:
- for hotkey in HOTKEYS:
- getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setEnabled(False)
-
-
-def after_setting_hotkey(autosplit: AutoSplit):
- """
- Do all of these things after you set a hotkey.
- A signal connects to this because changing GUI stuff is only possible in the main thread.
- """
- if not autosplit.is_running:
- autosplit.start_auto_splitter_button.setEnabled(True)
- if autosplit.SettingsWidget:
- for hotkey in HOTKEYS:
- getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setText(SET_HOTKEY_TEXT)
- getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setEnabled(True)
-
-
-def send_command(autosplit: AutoSplit, command: Commands):
- # Note: Rather than having the start image able to also reset the timer,
- # having the reset image check be active at all time would be a better, more organic solution,
- # but that is dependent on migrating to an observer pattern (#219) and being able to reload all images.
- if autosplit.is_auto_controlled:
- if command == "start" and autosplit.settings_dict["start_also_resets"]:
- print("reset", flush=True)
- print(command, flush=True)
- elif command == "start":
- if autosplit.settings_dict["start_also_resets"]:
- _send_hotkey(autosplit.settings_dict["reset_hotkey"])
- _send_hotkey(autosplit.settings_dict["split_hotkey"])
- elif command == "split":
- _send_hotkey(autosplit.settings_dict["split_hotkey"])
- elif command == "pause":
- _send_hotkey(autosplit.settings_dict["pause_hotkey"])
- elif command == "reset":
- _send_hotkey(autosplit.settings_dict["reset_hotkey"])
- elif command == "skip":
- _send_hotkey(autosplit.settings_dict["skip_split_hotkey"])
- elif command == "undo":
- _send_hotkey(autosplit.settings_dict["undo_split_hotkey"])
-
- else:
- raise KeyError(f"{command!r} is not a valid command")
-
-
-def _unhook(hotkey_callback: Callable[[], None] | None):
- try:
- if hotkey_callback:
- keyboard.unhook_key(hotkey_callback)
- except (AttributeError, KeyError, ValueError):
- pass
-
-
-def _send_hotkey(hotkey_or_scan_code: int | str | None):
- """Supports sending the appropriate scan code for all the special cases."""
- if not hotkey_or_scan_code:
- return
-
- # Deal with regular inputs
- # If an int or does not contain the following strings
- if (
- isinstance(hotkey_or_scan_code, int)
- or not any(key in hotkey_or_scan_code for key in ("num ", "decimal", "+"))
- ):
- keyboard.send(hotkey_or_scan_code)
- return
-
- # FIXME: Localized keys won't work here
- # Deal with problematic keys. Even by sending specific scan code "keyboard" still sends the default (wrong) key
- # keyboard also has issues with capitalization modifier (shift+A)
- # keyboard.send(keyboard.key_to_scan_codes(key_or_scan_code)[1])
- pyautogui.hotkey(
- *[
- "+" if key == "plus" else key
- for key
- in hotkey_or_scan_code.replace(" ", "").split("+")
- ],
- )
-
-
-def __validate_keypad(expected_key: str, keyboard_event: keyboard.KeyboardEvent) -> bool:
- """
- NOTE: This is a workaround very specific to numpads.
- Windows reports different physical keys with the same scan code.
- For example, "Home", "Num Home" and "Num 7" are all `71`.
- See: https://github.com/boppreh/keyboard/issues/171#issuecomment-390437684 .
-
- Since we reuse the key string we set to send to LiveSplit, we can't use fake names like "num home".
- We're also trying to achieve the same hotkey behaviour as LiveSplit has.
- """
- # Prevent "(keypad)delete", "(keypad)./decimal" and "del" from triggering each other
- # as well as "." and "(keypad)./decimal"
- if keyboard_event.scan_code in {83, 52}:
- # TODO: "del" won't work with "(keypad)delete" if localized in non-english (ie: "suppr" in french)
- return expected_key == keyboard_event.name
- # Prevent "action keys" from triggering "keypad keys"
- if keyboard_event.name and is_digit(keyboard_event.name[-1]):
- # Prevent "regular numbers" and "keypad numbers" from activating each other
- return bool(
- keyboard_event.is_keypad
- if expected_key.startswith("num ")
- else not keyboard_event.is_keypad,
- )
-
- # Prevent "keypad action keys" from triggering "regular numbers" and "keypad numbers"
- # Still allow the same key that might be localized differently on keypad vs non-keypad
- return not is_digit(expected_key[-1])
-
-
-def _hotkey_action(keyboard_event: keyboard.KeyboardEvent, key_name: str, action: Callable[[], None]):
- """
- We're doing the check here instead of saving the key code because
- the non-keypad shared keys are localized while the keypad ones aren't.
- They also share scan codes on Windows.
- """
- if keyboard_event.event_type == keyboard.KEY_DOWN and __validate_keypad(key_name, keyboard_event):
- action()
-
-
-def __get_key_name(keyboard_event: keyboard.KeyboardEvent):
- """Ensures proper keypad name."""
- event_name = str(keyboard_event.name)
- # Normally this is done by keyboard.get_hotkey_name. But our code won't always get there.
- if event_name == "+":
- return "plus"
- return f"num {keyboard_event.name}" \
- if keyboard_event.is_keypad and is_digit(keyboard_event.name) \
- else event_name
-
-
-def __get_hotkey_name(names: list[str]):
- """
- Uses keyboard.get_hotkey_name but works with non-english modifiers and keypad
- See: https://github.com/boppreh/keyboard/issues/516 .
- """
- if not names:
- return ""
-
- if len(names) == 1:
- return names[0]
-
- def sorting_key(key: str):
- return not keyboard.is_modifier(keyboard.key_to_scan_codes(key)[0])
- clean_names = sorted(keyboard.get_hotkey_name(names).split("+"), key=sorting_key)
- # Replace the last key in hotkey_name with what we actually got as a last key_name
- # This ensures we keep proper keypad names
- return "+".join(clean_names[:-1] + names[-1:])
-
-
-def __read_hotkey():
- """
- Blocks until a hotkey combination is read.
- Returns the hotkey_name and last KeyboardEvent.
- """
- names: list[str] = []
- while True:
- keyboard_event = keyboard.read_event(True)
- # LiveSplit supports modifier keys as the last key, so any keyup means end of hotkey
- if keyboard_event.event_type == keyboard.KEY_UP:
- # Unless keyup is also the very first event,
- # which can happen from a very fast press at the same time we start reading
- if not names:
- continue
- break
- key_name = __get_key_name(keyboard_event)
- # Ignore long presses
- if names and names[-1] == key_name:
- continue
- names.append(__get_key_name(keyboard_event))
- # Stop at the first non-modifier to prevent registering a hotkey with multiple regular keys
- if not keyboard.is_modifier(keyboard_event.scan_code):
- break
- return __get_hotkey_name(names)
-
-
-def __remove_key_already_set(autosplit: AutoSplit, key_name: str):
- for hotkey in HOTKEYS:
- settings_key = f"{hotkey}_hotkey"
- if autosplit.settings_dict.get(settings_key) == key_name:
- _unhook(getattr(autosplit, f"{hotkey}_hotkey"))
- autosplit.settings_dict[settings_key] = "" # pyright: ignore[reportGeneralTypeIssues]
- if autosplit.SettingsWidget:
- getattr(autosplit.SettingsWidget, f"{hotkey}_input").setText("")
-
-
-def __get_hotkey_action(autosplit: AutoSplit, hotkey: Hotkey):
- if hotkey == "split":
- return autosplit.start_auto_splitter
- if hotkey == "skip_split":
- return lambda: autosplit.skip_split(True)
- if hotkey == "undo_split":
- return lambda: autosplit.undo_split(True)
- if hotkey == "toggle_auto_reset_image":
- def toggle_auto_reset_image():
- new_value = not autosplit.settings_dict["enable_auto_reset"]
- autosplit.settings_dict["enable_auto_reset"] = new_value
- if autosplit.SettingsWidget:
- autosplit.SettingsWidget.enable_auto_reset_image_checkbox.setChecked(new_value)
- return toggle_auto_reset_image
- return getattr(autosplit, f"{hotkey}_signal").emit
-
-
-def is_valid_hotkey_name(hotkey_name: str):
- return any(
- key and not keyboard.is_modifier(keyboard.key_to_scan_codes(key)[0])
- for key
- in hotkey_name.split("+")
- )
-
-# TODO: using getattr/setattr is NOT a good way to go about this. It was only temporarily done to
-# reduce duplicated code. We should use a dictionary of hotkey class or something.
-
-
-def set_hotkey(autosplit: AutoSplit, hotkey: Hotkey, preselected_hotkey_name: str = ""):
- if autosplit.SettingsWidget:
- # Unfocus all fields
- cast(QtWidgets.QWidget, autosplit.SettingsWidget).setFocus()
- getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setText(PRESS_A_KEY_TEXT)
-
- # Disable some buttons
- before_setting_hotkey(autosplit)
-
- # New thread points to read_and_set_hotkey. this thread is needed or GUI will freeze
- # while the program waits for user input on the hotkey
- @fire_and_forget
- def read_and_set_hotkey():
- try:
- hotkey_name = preselected_hotkey_name or __read_hotkey()
-
- # Unset hotkey by pressing "Escape". This is the same behaviour as LiveSplit
- if hotkey_name == "esc":
- _unhook(getattr(autosplit, f"{hotkey}_hotkey"))
- autosplit.settings_dict[f"{hotkey}_hotkey"] = "" # pyright: ignore[reportGeneralTypeIssues]
- if autosplit.SettingsWidget:
- getattr(autosplit.SettingsWidget, f"{hotkey}_input").setText("")
- return
-
- if not is_valid_hotkey_name(hotkey_name):
- autosplit.show_error_signal.emit(lambda: error_messages.invalid_hotkey(hotkey_name))
- return
-
- # Try to remove the previously set hotkey if there is one
- _unhook(getattr(autosplit, f"{hotkey}_hotkey"))
-
- # Remove any hotkey using the same key combination
- __remove_key_already_set(autosplit, hotkey_name)
-
- action = __get_hotkey_action(autosplit, hotkey)
- setattr(
- autosplit,
- f"{hotkey}_hotkey",
- # keyboard.add_hotkey doesn't give the last keyboard event, so we can't __validate_keypad.
- # This means "ctrl + num 5" and "ctrl + 5" will both be registered.
- # For that reason, we still prefer keyboard.hook_key for single keys.
- # keyboard module allows you to hit multiple keys for a hotkey. they are joined together by +.
- keyboard.add_hotkey(hotkey_name, action)
- if "+" in hotkey_name
- # We need to inspect the event to know if it comes from numpad because of _canonial_names.
- # See: https://github.com/boppreh/keyboard/issues/161#issuecomment-386825737
- # The best way to achieve this is make our own hotkey handling on top of hook
- # See: https://github.com/boppreh/keyboard/issues/216#issuecomment-431999553
- else keyboard.hook_key(
- hotkey_name,
- lambda keyboard_event: _hotkey_action(keyboard_event, hotkey_name, action),
- ),
- )
-
- if autosplit.SettingsWidget:
- getattr(autosplit.SettingsWidget, f"{hotkey}_input").setText(hotkey_name)
- autosplit.settings_dict[f"{hotkey}_hotkey"] = hotkey_name # pyright: ignore[reportGeneralTypeIssues]
- except Exception as exception: # noqa: BLE001 # We really want to catch everything here
- error = exception
- autosplit.show_error_signal.emit(lambda: error_messages.exception_traceback(error))
- finally:
- autosplit.after_setting_hotkey_signal.emit()
-
- read_and_set_hotkey()
+from collections.abc import Callable
+from typing import TYPE_CHECKING, Literal, cast
+
+import keyboard
+import pyautogui
+from PySide6 import QtWidgets
+
+import error_messages
+from utils import fire_and_forget, is_digit
+
+if TYPE_CHECKING:
+ from AutoSplit import AutoSplit
+
+# While not usually recommended, we don't manipulate the mouse, and we don't want the extra delay
+pyautogui.FAILSAFE = False
+
+SET_HOTKEY_TEXT = "Set Hotkey"
+PRESS_A_KEY_TEXT = "Press a key..."
+
+Commands = Literal["split", "start", "pause", "reset", "skip", "undo"]
+Hotkey = Literal["split", "reset", "skip_split", "undo_split", "pause", "screenshot", "toggle_auto_reset_image"]
+HOTKEYS: list[Hotkey] = ["split", "reset", "skip_split", "undo_split", "pause", "screenshot", "toggle_auto_reset_image"]
+
+
+def remove_all_hotkeys():
+ keyboard.unhook_all()
+
+
+def before_setting_hotkey(autosplit: "AutoSplit"):
+ """Do all of these after you click "Set Hotkey" but before you type the hotkey."""
+ autosplit.start_auto_splitter_button.setEnabled(False)
+ if autosplit.SettingsWidget:
+ for hotkey in HOTKEYS:
+ getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setEnabled(False)
+
+
+def after_setting_hotkey(autosplit: "AutoSplit"):
+ """
+ Do all of these things after you set a hotkey.
+ A signal connects to this because changing GUI stuff is only possible in the main thread.
+ """
+ if not autosplit.is_running:
+ autosplit.start_auto_splitter_button.setEnabled(True)
+ if autosplit.SettingsWidget:
+ for hotkey in HOTKEYS:
+ getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setText(SET_HOTKEY_TEXT)
+ getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setEnabled(True)
+
+
+def send_command(autosplit: "AutoSplit", command: Commands):
+ # Note: Rather than having the start image able to also reset the timer,
+ # having the reset image check be active at all time would be a better, more organic solution,
+ # but that is dependent on migrating to an observer pattern (#219) and being able to reload all images.
+ match command:
+ case _ if autosplit.settings_dict["start_also_resets"]:
+ if command == "start" and autosplit.settings_dict["start_also_resets"]:
+ print("reset", flush=True)
+ print(command, flush=True)
+ case "start" if autosplit.settings_dict["start_also_resets"]:
+ _send_hotkey(autosplit.settings_dict["reset_hotkey"])
+ case "reset":
+ _send_hotkey(autosplit.settings_dict["reset_hotkey"])
+ case "start" | "split":
+ _send_hotkey(autosplit.settings_dict["split_hotkey"])
+ case "pause":
+ _send_hotkey(autosplit.settings_dict["pause_hotkey"])
+ case "skip":
+ _send_hotkey(autosplit.settings_dict["skip_split_hotkey"])
+ case "undo":
+ _send_hotkey(autosplit.settings_dict["undo_split_hotkey"])
+ case _: # pyright: ignore[reportUnnecessaryComparison]
+ raise KeyError(f"{command!r} is not a valid command")
+
+
+def _unhook(hotkey_callback: Callable[[], None] | None):
+ try:
+ if hotkey_callback:
+ keyboard.unhook_key(hotkey_callback)
+ except (AttributeError, KeyError, ValueError):
+ pass
+
+
+def _send_hotkey(hotkey_or_scan_code: int | str | None):
+ """Supports sending the appropriate scan code for all the special cases."""
+ if not hotkey_or_scan_code:
+ return
+
+ # Deal with regular inputs
+ # If an int or does not contain the following strings
+ if (
+ isinstance(hotkey_or_scan_code, int)
+ or not any(key in hotkey_or_scan_code for key in ("num ", "decimal", "+"))
+ ):
+ keyboard.send(hotkey_or_scan_code)
+ return
+
+ # FIXME: Localized keys won't work here
+ # Deal with problematic keys. Even by sending specific scan code "keyboard" still sends the default (wrong) key
+ # keyboard also has issues with capitalization modifier (shift+A)
+ # keyboard.send(keyboard.key_to_scan_codes(key_or_scan_code)[1])
+ pyautogui.hotkey(
+ *[
+ "+" if key == "plus" else key
+ for key
+ in hotkey_or_scan_code.replace(" ", "").split("+")
+ ],
+ )
+
+
+def __validate_keypad(expected_key: str, keyboard_event: keyboard.KeyboardEvent) -> bool:
+ """
+ NOTE: This is a workaround very specific to numpads.
+ Windows reports different physical keys with the same scan code.
+ For example, "Home", "Num Home" and "Num 7" are all `71`.
+ See: https://github.com/boppreh/keyboard/issues/171#issuecomment-390437684 .
+
+ Since we reuse the key string we set to send to LiveSplit, we can't use fake names like "num home".
+ We're also trying to achieve the same hotkey behaviour as LiveSplit has.
+ """
+ # Prevent "(keypad)delete", "(keypad)./decimal" and "del" from triggering each other
+ # as well as "." and "(keypad)./decimal"
+ if keyboard_event.scan_code in {83, 52}:
+ # TODO: "del" won't work with "(keypad)delete" if localized in non-english (ie: "suppr" in french)
+ return expected_key == keyboard_event.name
+ # Prevent "action keys" from triggering "keypad keys"
+ if keyboard_event.name and is_digit(keyboard_event.name[-1]):
+ # Prevent "regular numbers" and "keypad numbers" from activating each other
+ return bool(
+ keyboard_event.is_keypad
+ if expected_key.startswith("num ")
+ else not keyboard_event.is_keypad,
+ )
+
+ # Prevent "keypad action keys" from triggering "regular numbers" and "keypad numbers"
+ # Still allow the same key that might be localized differently on keypad vs non-keypad
+ return not is_digit(expected_key[-1])
+
+
+def _hotkey_action(keyboard_event: keyboard.KeyboardEvent, key_name: str, action: Callable[[], None]):
+ """
+ We're doing the check here instead of saving the key code because
+ the non-keypad shared keys are localized while the keypad ones aren't.
+ They also share scan codes on Windows.
+ """
+ if keyboard_event.event_type == keyboard.KEY_DOWN and __validate_keypad(key_name, keyboard_event):
+ action()
+
+
+def __get_key_name(keyboard_event: keyboard.KeyboardEvent):
+ """Ensures proper keypad name."""
+ event_name = str(keyboard_event.name)
+ # Normally this is done by keyboard.get_hotkey_name. But our code won't always get there.
+ if event_name == "+":
+ return "plus"
+ return f"num {keyboard_event.name}" \
+ if keyboard_event.is_keypad and is_digit(keyboard_event.name) \
+ else event_name
+
+
+def __get_hotkey_name(names: list[str]):
+ """
+ Uses keyboard.get_hotkey_name but works with non-english modifiers and keypad
+ See: https://github.com/boppreh/keyboard/issues/516 .
+ """
+ if not names:
+ return ""
+
+ if len(names) == 1:
+ return names[0]
+
+ def sorting_key(key: str):
+ return not keyboard.is_modifier(keyboard.key_to_scan_codes(key)[0])
+
+ clean_names = sorted(keyboard.get_hotkey_name(names).split("+"), key=sorting_key)
+ # Replace the last key in hotkey_name with what we actually got as a last key_name
+ # This ensures we keep proper keypad names
+ return "+".join(clean_names[:-1] + names[-1:])
+
+
+def __read_hotkey():
+ """
+ Blocks until a hotkey combination is read.
+ Returns the hotkey_name and last KeyboardEvent.
+ """
+ names: list[str] = []
+ while True:
+ keyboard_event = keyboard.read_event(True)
+ # LiveSplit supports modifier keys as the last key, so any keyup means end of hotkey
+ if keyboard_event.event_type == keyboard.KEY_UP:
+ # Unless keyup is also the very first event,
+ # which can happen from a very fast press at the same time we start reading
+ if not names:
+ continue
+ break
+ key_name = __get_key_name(keyboard_event)
+ # Ignore long presses
+ if names and names[-1] == key_name:
+ continue
+ names.append(__get_key_name(keyboard_event))
+ # Stop at the first non-modifier to prevent registering a hotkey with multiple regular keys
+ if not keyboard.is_modifier(keyboard_event.scan_code):
+ break
+ return __get_hotkey_name(names)
+
+
+def __remove_key_already_set(autosplit: "AutoSplit", key_name: str):
+ for hotkey in HOTKEYS:
+ settings_key = f"{hotkey}_hotkey"
+ if autosplit.settings_dict.get(settings_key) == key_name:
+ _unhook(getattr(autosplit, f"{hotkey}_hotkey"))
+ autosplit.settings_dict[settings_key] = "" # pyright: ignore[reportGeneralTypeIssues]
+ if autosplit.SettingsWidget:
+ getattr(autosplit.SettingsWidget, f"{hotkey}_input").setText("")
+
+
+def __get_hotkey_action(autosplit: "AutoSplit", hotkey: Hotkey):
+ if hotkey == "split":
+ return autosplit.start_auto_splitter
+ if hotkey == "skip_split":
+ return lambda: autosplit.skip_split(True)
+ if hotkey == "undo_split":
+ return lambda: autosplit.undo_split(True)
+ if hotkey == "toggle_auto_reset_image":
+
+ def toggle_auto_reset_image():
+ new_value = not autosplit.settings_dict["enable_auto_reset"]
+ autosplit.settings_dict["enable_auto_reset"] = new_value
+ if autosplit.SettingsWidget:
+ autosplit.SettingsWidget.enable_auto_reset_image_checkbox.setChecked(new_value)
+
+ return toggle_auto_reset_image
+ return getattr(autosplit, f"{hotkey}_signal").emit
+
+
+def is_valid_hotkey_name(hotkey_name: str):
+ return any(
+ key and not keyboard.is_modifier(keyboard.key_to_scan_codes(key)[0])
+ for key
+ in hotkey_name.split("+")
+ )
+
+# TODO: using getattr/setattr is NOT a good way to go about this. It was only temporarily done to
+# reduce duplicated code. We should use a dictionary of hotkey class or something.
+
+
+def set_hotkey(autosplit: "AutoSplit", hotkey: Hotkey, preselected_hotkey_name: str = ""):
+ if autosplit.SettingsWidget:
+ # Unfocus all fields
+ cast(QtWidgets.QWidget, autosplit.SettingsWidget).setFocus()
+ getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setText(PRESS_A_KEY_TEXT)
+
+ # Disable some buttons
+ before_setting_hotkey(autosplit)
+
+ # New thread points to read_and_set_hotkey. this thread is needed or GUI will freeze
+ # while the program waits for user input on the hotkey
+ @fire_and_forget
+ def read_and_set_hotkey():
+ try:
+ hotkey_name = preselected_hotkey_name or __read_hotkey()
+
+ # Unset hotkey by pressing "Escape". This is the same behaviour as LiveSplit
+ if hotkey_name == "esc":
+ _unhook(getattr(autosplit, f"{hotkey}_hotkey"))
+ autosplit.settings_dict[f"{hotkey}_hotkey"] = "" # pyright: ignore[reportGeneralTypeIssues]
+ if autosplit.SettingsWidget:
+ getattr(autosplit.SettingsWidget, f"{hotkey}_input").setText("")
+ return
+
+ if not is_valid_hotkey_name(hotkey_name):
+ autosplit.show_error_signal.emit(lambda: error_messages.invalid_hotkey(hotkey_name))
+ return
+
+ # Try to remove the previously set hotkey if there is one
+ _unhook(getattr(autosplit, f"{hotkey}_hotkey"))
+
+ # Remove any hotkey using the same key combination
+ __remove_key_already_set(autosplit, hotkey_name)
+
+ action = __get_hotkey_action(autosplit, hotkey)
+ setattr(
+ autosplit,
+ f"{hotkey}_hotkey",
+ # keyboard.add_hotkey doesn't give the last keyboard event, so we can't __validate_keypad.
+ # This means "ctrl + num 5" and "ctrl + 5" will both be registered.
+ # For that reason, we still prefer keyboard.hook_key for single keys.
+ # keyboard module allows you to hit multiple keys for a hotkey. they are joined together by +.
+ keyboard.add_hotkey(hotkey_name, action)
+ if "+" in hotkey_name
+ # We need to inspect the event to know if it comes from numpad because of _canonial_names.
+ # See: https://github.com/boppreh/keyboard/issues/161#issuecomment-386825737
+ # The best way to achieve this is make our own hotkey handling on top of hook
+ # See: https://github.com/boppreh/keyboard/issues/216#issuecomment-431999553
+ else keyboard.hook_key(
+ hotkey_name,
+ lambda keyboard_event: _hotkey_action(keyboard_event, hotkey_name, action),
+ ),
+ )
+
+ if autosplit.SettingsWidget:
+ getattr(autosplit.SettingsWidget, f"{hotkey}_input").setText(hotkey_name)
+ autosplit.settings_dict[f"{hotkey}_hotkey"] = hotkey_name # pyright: ignore[reportGeneralTypeIssues]
+ except Exception as exception: # noqa: BLE001 # We really want to catch everything here
+ error = exception
+ autosplit.show_error_signal.emit(lambda: error_messages.exception_traceback(error))
+ finally:
+ autosplit.after_setting_hotkey_signal.emit()
+
+ read_and_set_hotkey()
diff --git a/src/menu_bar.py b/src/menu_bar.py
index 2ef85d30..45b79d76 100644
--- a/src/menu_bar.py
+++ b/src/menu_bar.py
@@ -1,401 +1,400 @@
-from __future__ import annotations
-
-import asyncio
-import webbrowser
-from typing import TYPE_CHECKING, Any, cast
-
-import requests
-from packaging.version import parse as version_parse
-from PySide6 import QtCore, QtWidgets
-from PySide6.QtCore import Qt
-from PySide6.QtGui import QBrush, QPalette
-from PySide6.QtWidgets import QFileDialog
-from requests.exceptions import RequestException
-from typing_extensions import override
-
-import error_messages
-import user_profile
-from capture_method import (
- CAPTURE_METHODS,
- CameraInfo,
- CaptureMethodEnum,
- change_capture_method,
- get_all_video_capture_devices,
-)
-from gen import about, design, settings as settings_ui, update_checker
-from hotkeys import HOTKEYS, Hotkey, set_hotkey
-from utils import AUTOSPLIT_VERSION, GITHUB_REPOSITORY, decimal, fire_and_forget
-
-if TYPE_CHECKING:
- from AutoSplit import AutoSplit
-
-HALF_BRIGHTNESS = 128
-
-
-class __AboutWidget(QtWidgets.QWidget, about.Ui_AboutAutoSplitWidget): # noqa: N801 # Private class
- """About Window."""
-
- def __init__(self):
- super().__init__()
- self.setupUi(self)
- self.created_by_label.setOpenExternalLinks(True)
- self.donate_button_label.setOpenExternalLinks(True)
- self.version_label.setText(f"Version: {AUTOSPLIT_VERSION}")
- self.show()
-
-
-def open_about(autosplit: AutoSplit):
- if not autosplit.AboutWidget or cast(QtWidgets.QWidget, autosplit.AboutWidget).isHidden():
- autosplit.AboutWidget = __AboutWidget()
-
-
-class __UpdateCheckerWidget(QtWidgets.QWidget, update_checker.Ui_UpdateChecker): # noqa: N801 # Private class
- def __init__(self, latest_version: str, design_window: design.Ui_MainWindow, check_on_open: bool = False):
- super().__init__()
- self.setupUi(self)
- self.current_version_number_label.setText(AUTOSPLIT_VERSION)
- self.latest_version_number_label.setText(latest_version)
- self.left_button.clicked.connect(self.open_update)
- self.do_not_ask_again_checkbox.stateChanged.connect(self.do_not_ask_me_again_state_changed)
- self.design_window = design_window
- if version_parse(latest_version) > version_parse(AUTOSPLIT_VERSION):
- self.do_not_ask_again_checkbox.setVisible(check_on_open)
- self.left_button.setFocus()
- self.show()
- elif not check_on_open:
- self.update_status_label.setText("You are on the latest AutoSplit version.")
- self.go_to_download_label.setVisible(False)
- self.left_button.setVisible(False)
- self.right_button.setText("OK")
- self.do_not_ask_again_checkbox.setVisible(False)
- self.show()
-
- def open_update(self):
- webbrowser.open(f"https://github.com/{GITHUB_REPOSITORY}/releases/latest")
- self.close()
-
- def do_not_ask_me_again_state_changed(self):
- user_profile.set_check_for_updates_on_open(
- self.design_window,
- self.do_not_ask_again_checkbox.isChecked(),
- )
-
-
-def open_update_checker(autosplit: AutoSplit, latest_version: str, check_on_open: bool):
- if not autosplit.UpdateCheckerWidget or cast(QtWidgets.QWidget, autosplit.UpdateCheckerWidget).isHidden():
- autosplit.UpdateCheckerWidget = __UpdateCheckerWidget(latest_version, autosplit, check_on_open)
-
-
-def view_help():
- webbrowser.open(f"https://github.com/{GITHUB_REPOSITORY}#tutorial")
-
-
-class __CheckForUpdatesThread(QtCore.QThread): # noqa: N801 # Private class
- def __init__(self, autosplit: AutoSplit, check_on_open: bool):
- super().__init__()
- self.autosplit = autosplit
- self.check_on_open = check_on_open
-
- @override
- def run(self):
- try:
- response = requests.get(f"https://api.github.com/repos/{GITHUB_REPOSITORY}/releases/latest", timeout=30)
- latest_version = str(response.json()["name"]).split("v")[1]
- self.autosplit.update_checker_widget_signal.emit(latest_version, self.check_on_open)
- except (RequestException, KeyError):
- if not self.check_on_open:
- self.autosplit.show_error_signal.emit(error_messages.check_for_updates)
-
-
-def about_qt():
- webbrowser.open("https://wiki.qt.io/About_Qt")
-
-
-def about_qt_for_python():
- webbrowser.open("https://wiki.qt.io/Qt_for_Python")
-
-
-def check_for_updates(autosplit: AutoSplit, check_on_open: bool = False):
- autosplit.CheckForUpdatesThread = __CheckForUpdatesThread(autosplit, check_on_open)
- autosplit.CheckForUpdatesThread.start()
-
-
-class __SettingsWidget(QtWidgets.QWidget, settings_ui.Ui_SettingsWidget): # noqa: N801 # Private class
- def __init__(self, autosplit: AutoSplit):
- super().__init__()
- self.__video_capture_devices: list[CameraInfo] = []
- """
- Used to temporarily store the existing cameras,
- we don't want to call `get_all_video_capture_devices` agains and possibly have a different result
- """
-
- self.setupUi(self)
-
- # Fix Fusion Dark Theme's tabs content looking weird because it's using the button role
- window_color = self.palette().color(QPalette.ColorRole.Window)
- if window_color.red() < HALF_BRIGHTNESS:
- brush = QBrush(window_color)
- brush.setStyle(Qt.BrushStyle.SolidPattern)
- palette = QPalette()
- palette.setBrush(QPalette.ColorGroup.Active, QPalette.ColorRole.Button, brush)
- palette.setBrush(QPalette.ColorGroup.Inactive, QPalette.ColorRole.Button, brush)
- palette.setBrush(QPalette.ColorGroup.Disabled, QPalette.ColorRole.Button, brush)
- self.settings_tabs.setPalette(palette)
-
- self.autosplit = autosplit
- self.__set_readme_link()
- # Don't autofocus any particular field
- self.setFocus()
-
-
-# region Build the Capture method combobox
- capture_method_values = CAPTURE_METHODS.values()
- self.__set_all_capture_devices()
-
- # TODO: Word-wrapping works, but there's lots of extra padding to the right. Raise issue upstream
- # list_view = QtWidgets.QListView()
- # list_view.setWordWrap(True)
- # list_view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOff)
- # list_view.setFixedWidth(self.capture_method_combobox.width())
- # self.capture_method_combobox.setView(list_view)
-
- self.capture_method_combobox.addItems([
- f"- {method.name} ({method.short_description})"
- for method in capture_method_values
- ])
- self.capture_method_combobox.setToolTip(
- "\n\n".join([
- f"{method.name} :\n{method.description}"
- for method in capture_method_values
- ]),
- )
-# endregion
-
- self.__setup_bindings()
-
- self.show()
-
- def __update_default_threshold(self, value: Any):
- self.__set_value("default_similarity_threshold", value)
- self.autosplit.table_current_image_threshold_label.setText(
- decimal(self.autosplit.split_image.get_similarity_threshold(self.autosplit))
- if self.autosplit.split_image
- else "-",
- )
- self.autosplit.table_reset_image_threshold_label.setText(
- decimal(self.autosplit.reset_image.get_similarity_threshold(self.autosplit))
- if self.autosplit.reset_image
- else "-",
- )
-
- def __set_value(self, key: str, value: Any):
- self.autosplit.settings_dict[key] = value
-
- def get_capture_device_index(self, capture_device_id: int):
- """Returns 0 if the capture_device_id is invalid."""
- try:
- return [device.device_id for device in self.__video_capture_devices].index(capture_device_id)
- except ValueError:
- return 0
-
- def __enable_capture_device_if_its_selected_method(
- self,
- selected_capture_method: str | CaptureMethodEnum | None = None,
- ):
- if selected_capture_method is None:
- selected_capture_method = self.autosplit.settings_dict["capture_method"]
- is_video_capture_device = selected_capture_method == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE
- self.capture_device_combobox.setEnabled(is_video_capture_device)
- if is_video_capture_device:
- self.capture_device_combobox.setCurrentIndex(
- self.get_capture_device_index(self.autosplit.settings_dict["capture_device_id"]),
- )
- else:
- self.capture_device_combobox.setPlaceholderText('Select "Video Capture Device" above')
- self.capture_device_combobox.setCurrentIndex(-1)
-
- def __capture_method_changed(self):
- selected_capture_method = CAPTURE_METHODS.get_method_by_index(self.capture_method_combobox.currentIndex())
- self.__enable_capture_device_if_its_selected_method(selected_capture_method)
- change_capture_method(selected_capture_method, self.autosplit)
- return selected_capture_method
-
- def __capture_device_changed(self):
- device_index = self.capture_device_combobox.currentIndex()
- if device_index == -1:
- return
- capture_device = self.__video_capture_devices[device_index]
- self.autosplit.settings_dict["capture_device_name"] = capture_device.name
- self.autosplit.settings_dict["capture_device_id"] = capture_device.device_id
- if self.autosplit.settings_dict["capture_method"] == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE:
- # Re-initializes the VideoCaptureDeviceCaptureMethod
- change_capture_method(CaptureMethodEnum.VIDEO_CAPTURE_DEVICE, self.autosplit)
-
- def __fps_limit_changed(self, value: int):
- value = self.fps_limit_spinbox.value()
- self.autosplit.settings_dict["fps_limit"] = value
- self.autosplit.timer_live_image.setInterval(int(1000 / value))
- self.autosplit.timer_live_image.setInterval(int(1000 / value))
-
- @fire_and_forget
- def __set_all_capture_devices(self):
- self.__video_capture_devices = asyncio.run(get_all_video_capture_devices())
- if len(self.__video_capture_devices) > 0:
- for i in range(self.capture_device_combobox.count()):
- self.capture_device_combobox.removeItem(i)
- self.capture_device_combobox.addItems([
- f"* {device.name}"
- + (f" [{device.backend}]" if device.backend else "")
- + (" (occupied)" if device.occupied else "")
- for device in self.__video_capture_devices
- ])
- self.__enable_capture_device_if_its_selected_method()
- else:
- self.capture_device_combobox.setPlaceholderText("No device found.")
-
- def __set_readme_link(self):
- self.custom_image_settings_info_label.setText(
- self.custom_image_settings_info_label
- .text()
- .format(GITHUB_REPOSITORY=GITHUB_REPOSITORY),
- )
- # HACK: This is a workaround because custom_image_settings_info_label
- # simply will not open links with a left click no matter what we tried.
- self.readme_link_button.clicked.connect(
- lambda: webbrowser.open(f"https://github.com/{GITHUB_REPOSITORY}#readme"),
- )
- self.readme_link_button.setStyleSheet("border: 0px; background-color:rgba(0,0,0,0%);")
-
- def __select_screenshot_directory(self):
- self.autosplit.settings_dict["screenshot_directory"] = QFileDialog.getExistingDirectory(
- self,
- "Select Screenshots Directory",
- self.autosplit.settings_dict["screenshot_directory"]
- or self.autosplit.settings_dict["split_image_directory"],
- )
- self.screenshot_directory_input.setText(self.autosplit.settings_dict["screenshot_directory"])
-
- def __setup_bindings(self):
- # Hotkey initial values and bindings
- def hotkey_connect(hotkey: Hotkey):
- return lambda: set_hotkey(self.autosplit, hotkey)
- for hotkey in HOTKEYS:
- hotkey_input: QtWidgets.QLineEdit = getattr(self, f"{hotkey}_input")
- set_hotkey_hotkey_button: QtWidgets.QPushButton = getattr(self, f"set_{hotkey}_hotkey_button")
- hotkey_input.setText(self.autosplit.settings_dict.get(f"{hotkey}_hotkey", ""))
-
- set_hotkey_hotkey_button.clicked.connect(hotkey_connect(hotkey))
- # Make it very clear that hotkeys are not used when auto-controlled
- if self.autosplit.is_auto_controlled and hotkey != "toggle_auto_reset_image":
- set_hotkey_hotkey_button.setEnabled(False)
- hotkey_input.setEnabled(False)
-
-# region Set initial values
- # Capture Settings
- self.fps_limit_spinbox.setValue(self.autosplit.settings_dict["fps_limit"])
- self.live_capture_region_checkbox.setChecked(self.autosplit.settings_dict["live_capture_region"])
- self.capture_method_combobox.setCurrentIndex(
- CAPTURE_METHODS.get_index(self.autosplit.settings_dict["capture_method"]),
- )
- # No self.capture_device_combobox.setCurrentIndex
- # It'll set itself asynchronously in self.__set_all_capture_devices()
- self.screenshot_directory_input.setText(self.autosplit.settings_dict["screenshot_directory"])
- self.open_screenshot_checkbox.setChecked(self.autosplit.settings_dict["open_screenshot"])
-
- # Image Settings
- self.default_comparison_method_combobox.setCurrentIndex(
- self.autosplit.settings_dict["default_comparison_method"],
- )
- self.default_similarity_threshold_spinbox.setValue(self.autosplit.settings_dict["default_similarity_threshold"])
- self.default_delay_time_spinbox.setValue(self.autosplit.settings_dict["default_delay_time"])
- self.default_pause_time_spinbox.setValue(self.autosplit.settings_dict["default_pause_time"])
- self.loop_splits_checkbox.setChecked(self.autosplit.settings_dict["loop_splits"])
- self.start_also_resets_checkbox.setChecked(self.autosplit.settings_dict["start_also_resets"])
- self.enable_auto_reset_image_checkbox.setChecked(self.autosplit.settings_dict["enable_auto_reset"])
-# endregion
-# region Binding
- # Capture Settings
- self.fps_limit_spinbox.valueChanged.connect(self.__fps_limit_changed)
- self.live_capture_region_checkbox.stateChanged.connect(
- lambda: self.__set_value("live_capture_region", self.live_capture_region_checkbox.isChecked()),
- )
- self.capture_method_combobox.currentIndexChanged.connect(
- lambda: self.__set_value("capture_method", self.__capture_method_changed()),
- )
- self.capture_device_combobox.currentIndexChanged.connect(self.__capture_device_changed)
- self.screenshot_directory_browse_button.clicked.connect(self.__select_screenshot_directory)
- self.open_screenshot_checkbox.stateChanged.connect(
- lambda: self.__set_value("open_screenshot", self.open_screenshot_checkbox.isChecked()),
- )
-
- # Image Settings
- self.default_comparison_method_combobox.currentIndexChanged.connect(
- lambda: self.__set_value(
- "default_comparison_method", self.default_comparison_method_combobox.currentIndex(),
- ),
- )
- self.default_similarity_threshold_spinbox.valueChanged.connect(
- lambda: self.__update_default_threshold(self.default_similarity_threshold_spinbox.value()),
- )
- self.default_delay_time_spinbox.valueChanged.connect(
- lambda: self.__set_value("default_delay_time", self.default_delay_time_spinbox.value()),
- )
- self.default_pause_time_spinbox.valueChanged.connect(
- lambda: self.__set_value("default_pause_time", self.default_pause_time_spinbox.value()),
- )
- self.loop_splits_checkbox.stateChanged.connect(
- lambda: self.__set_value("loop_splits", self.loop_splits_checkbox.isChecked()),
- )
- self.start_also_resets_checkbox.stateChanged.connect(
- lambda: self.__set_value("start_also_resets", self.start_also_resets_checkbox.isChecked()),
- )
- self.enable_auto_reset_image_checkbox.stateChanged.connect(
- lambda: self.__set_value("enable_auto_reset", self.enable_auto_reset_image_checkbox.isChecked()),
- )
-# endregion
-
-
-def open_settings(autosplit: AutoSplit):
- if not autosplit.SettingsWidget or cast(QtWidgets.QWidget, autosplit.SettingsWidget).isHidden():
- autosplit.SettingsWidget = __SettingsWidget(autosplit)
-
-
-def get_default_settings_from_ui(autosplit: AutoSplit):
- temp_dialog = QtWidgets.QWidget()
- default_settings_dialog = settings_ui.Ui_SettingsWidget()
- default_settings_dialog.setupUi(temp_dialog)
- default_settings: user_profile.UserProfileDict = {
- "split_hotkey": default_settings_dialog.split_input.text(),
- "reset_hotkey": default_settings_dialog.reset_input.text(),
- "undo_split_hotkey": default_settings_dialog.undo_split_input.text(),
- "skip_split_hotkey": default_settings_dialog.skip_split_input.text(),
- "pause_hotkey": default_settings_dialog.pause_input.text(),
- "screenshot_hotkey": default_settings_dialog.screenshot_input.text(),
- "toggle_auto_reset_image_hotkey": default_settings_dialog.toggle_auto_reset_image_input.text(),
- "fps_limit": default_settings_dialog.fps_limit_spinbox.value(),
- "live_capture_region": default_settings_dialog.live_capture_region_checkbox.isChecked(),
- "capture_method": CAPTURE_METHODS.get_method_by_index(
- default_settings_dialog.capture_method_combobox.currentIndex(),
- ),
- "capture_device_id": default_settings_dialog.capture_device_combobox.currentIndex(),
- "capture_device_name": "",
- "default_comparison_method": default_settings_dialog.default_comparison_method_combobox.currentIndex(),
- "default_similarity_threshold": default_settings_dialog.default_similarity_threshold_spinbox.value(),
- "default_delay_time": default_settings_dialog.default_delay_time_spinbox.value(),
- "default_pause_time": default_settings_dialog.default_pause_time_spinbox.value(),
- "loop_splits": default_settings_dialog.loop_splits_checkbox.isChecked(),
- "start_also_resets": default_settings_dialog.start_also_resets_checkbox.isChecked(),
- "enable_auto_reset": default_settings_dialog.enable_auto_reset_image_checkbox.isChecked(),
- "split_image_directory": autosplit.split_image_folder_input.text(),
- "screenshot_directory": default_settings_dialog.screenshot_directory_input.text(),
- "open_screenshot": default_settings_dialog.open_screenshot_checkbox.isChecked(),
- "captured_window_title": "",
- "capture_region": {
- "x": autosplit.x_spinbox.value(),
- "y": autosplit.y_spinbox.value(),
- "width": autosplit.width_spinbox.value(),
- "height": autosplit.height_spinbox.value(),
- },
- }
- del temp_dialog
- return default_settings
+import asyncio
+import webbrowser
+from typing import TYPE_CHECKING, Any, cast
+
+import requests
+from packaging.version import parse as version_parse
+from PySide6 import QtCore, QtWidgets
+from PySide6.QtCore import Qt
+from PySide6.QtGui import QBrush, QPalette
+from PySide6.QtWidgets import QFileDialog
+from requests.exceptions import RequestException
+from typing_extensions import override
+
+import error_messages
+import user_profile
+from capture_method import (
+ CAPTURE_METHODS,
+ CameraInfo,
+ CaptureMethodEnum,
+ change_capture_method,
+ get_all_video_capture_devices,
+)
+from gen import about, design, settings as settings_ui, update_checker
+from hotkeys import HOTKEYS, Hotkey, set_hotkey
+from utils import AUTOSPLIT_VERSION, GITHUB_REPOSITORY, decimal, fire_and_forget
+
+if TYPE_CHECKING:
+ from AutoSplit import AutoSplit
+
+HALF_BRIGHTNESS = 128
+
+
+class __AboutWidget(QtWidgets.QWidget, about.Ui_AboutAutoSplitWidget): # noqa: N801 # Private class
+ """About Window."""
+
+ def __init__(self):
+ super().__init__()
+ self.setupUi(self)
+ self.created_by_label.setOpenExternalLinks(True)
+ self.donate_button_label.setOpenExternalLinks(True)
+ self.version_label.setText(f"Version: {AUTOSPLIT_VERSION}")
+ self.show()
+
+
+def open_about(autosplit: "AutoSplit"):
+ if not autosplit.AboutWidget or cast(QtWidgets.QWidget, autosplit.AboutWidget).isHidden():
+ autosplit.AboutWidget = __AboutWidget()
+
+
+class __UpdateCheckerWidget(QtWidgets.QWidget, update_checker.Ui_UpdateChecker): # noqa: N801 # Private class
+ def __init__(self, latest_version: str, design_window: design.Ui_MainWindow, check_on_open: bool = False):
+ super().__init__()
+ self.setupUi(self)
+ self.current_version_number_label.setText(AUTOSPLIT_VERSION)
+ self.latest_version_number_label.setText(latest_version)
+ self.left_button.clicked.connect(self.open_update)
+ self.do_not_ask_again_checkbox.stateChanged.connect(self.do_not_ask_me_again_state_changed)
+ self.design_window = design_window
+ if version_parse(latest_version) > version_parse(AUTOSPLIT_VERSION):
+ self.do_not_ask_again_checkbox.setVisible(check_on_open)
+ self.left_button.setFocus()
+ self.show()
+ elif not check_on_open:
+ self.update_status_label.setText("You are on the latest AutoSplit version.")
+ self.go_to_download_label.setVisible(False)
+ self.left_button.setVisible(False)
+ self.right_button.setText("OK")
+ self.do_not_ask_again_checkbox.setVisible(False)
+ self.show()
+
+ def open_update(self):
+ webbrowser.open(f"https://github.com/{GITHUB_REPOSITORY}/releases/latest")
+ self.close()
+
+ def do_not_ask_me_again_state_changed(self):
+ user_profile.set_check_for_updates_on_open(
+ self.design_window,
+ self.do_not_ask_again_checkbox.isChecked(),
+ )
+
+
+def open_update_checker(autosplit: "AutoSplit", latest_version: str, check_on_open: bool):
+ if not autosplit.UpdateCheckerWidget or cast(QtWidgets.QWidget, autosplit.UpdateCheckerWidget).isHidden():
+ autosplit.UpdateCheckerWidget = __UpdateCheckerWidget(latest_version, autosplit, check_on_open)
+
+
+def view_help():
+ webbrowser.open(f"https://github.com/{GITHUB_REPOSITORY}#tutorial")
+
+
+class __CheckForUpdatesThread(QtCore.QThread): # noqa: N801 # Private class
+ def __init__(self, autosplit: "AutoSplit", check_on_open: bool):
+ super().__init__()
+ self.autosplit = autosplit
+ self.check_on_open = check_on_open
+
+ @override
+ def run(self):
+ try:
+ response = requests.get(f"https://api.github.com/repos/{GITHUB_REPOSITORY}/releases/latest", timeout=30)
+ latest_version = str(response.json()["name"]).split("v")[1]
+ self.autosplit.update_checker_widget_signal.emit(latest_version, self.check_on_open)
+ except (RequestException, KeyError):
+ if not self.check_on_open:
+ self.autosplit.show_error_signal.emit(error_messages.check_for_updates)
+
+
+def about_qt():
+ webbrowser.open("https://wiki.qt.io/About_Qt")
+
+
+def about_qt_for_python():
+ webbrowser.open("https://wiki.qt.io/Qt_for_Python")
+
+
+def check_for_updates(autosplit: "AutoSplit", check_on_open: bool = False):
+ autosplit.CheckForUpdatesThread = __CheckForUpdatesThread(autosplit, check_on_open)
+ autosplit.CheckForUpdatesThread.start()
+
+
+class __SettingsWidget(QtWidgets.QWidget, settings_ui.Ui_SettingsWidget): # noqa: N801 # Private class
+ def __init__(self, autosplit: "AutoSplit"):
+ super().__init__()
+ self.__video_capture_devices: list[CameraInfo] = []
+ """
+ Used to temporarily store the existing cameras,
+ we don't want to call `get_all_video_capture_devices` agains and possibly have a different result
+ """
+
+ self.setupUi(self)
+
+ # Fix Fusion Dark Theme's tabs content looking weird because it's using the button role
+ window_color = self.palette().color(QPalette.ColorRole.Window)
+ if window_color.red() < HALF_BRIGHTNESS:
+ brush = QBrush(window_color)
+ brush.setStyle(Qt.BrushStyle.SolidPattern)
+ palette = QPalette()
+ palette.setBrush(QPalette.ColorGroup.Active, QPalette.ColorRole.Button, brush)
+ palette.setBrush(QPalette.ColorGroup.Inactive, QPalette.ColorRole.Button, brush)
+ palette.setBrush(QPalette.ColorGroup.Disabled, QPalette.ColorRole.Button, brush)
+ self.settings_tabs.setPalette(palette)
+
+ self.autosplit = autosplit
+ self.__set_readme_link()
+ # Don't autofocus any particular field
+ self.setFocus()
+
+# region Build the Capture method combobox
+ capture_method_values = CAPTURE_METHODS.values()
+ self.__set_all_capture_devices()
+
+ # TODO: Word-wrapping works, but there's lots of extra padding to the right. Raise issue upstream
+ # list_view = QtWidgets.QListView()
+ # list_view.setWordWrap(True)
+ # list_view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOff)
+ # list_view.setFixedWidth(self.capture_method_combobox.width())
+ # self.capture_method_combobox.setView(list_view)
+
+ self.capture_method_combobox.addItems([
+ f"- {method.name} ({method.short_description})"
+ for method in capture_method_values
+ ])
+ self.capture_method_combobox.setToolTip(
+ "\n\n".join([
+ f"{method.name} :\n{method.description}"
+ for method in capture_method_values
+ ]),
+ )
+# endregion
+
+ self.__setup_bindings()
+
+ self.show()
+
+ def __update_default_threshold(self, value: Any):
+ self.__set_value("default_similarity_threshold", value)
+ self.autosplit.table_current_image_threshold_label.setText(
+ decimal(self.autosplit.split_image.get_similarity_threshold(self.autosplit))
+ if self.autosplit.split_image
+ else "-",
+ )
+ self.autosplit.table_reset_image_threshold_label.setText(
+ decimal(self.autosplit.reset_image.get_similarity_threshold(self.autosplit))
+ if self.autosplit.reset_image
+ else "-",
+ )
+
+ def __set_value(self, key: str, value: Any):
+ self.autosplit.settings_dict[key] = value
+
+ def get_capture_device_index(self, capture_device_id: int):
+ """Returns 0 if the capture_device_id is invalid."""
+ try:
+ return [device.device_id for device in self.__video_capture_devices].index(capture_device_id)
+ except ValueError:
+ return 0
+
+ def __enable_capture_device_if_its_selected_method(
+ self,
+ selected_capture_method: str | CaptureMethodEnum | None = None,
+ ):
+ if selected_capture_method is None:
+ selected_capture_method = self.autosplit.settings_dict["capture_method"]
+ is_video_capture_device = selected_capture_method == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE
+ self.capture_device_combobox.setEnabled(is_video_capture_device)
+ if is_video_capture_device:
+ self.capture_device_combobox.setCurrentIndex(
+ self.get_capture_device_index(self.autosplit.settings_dict["capture_device_id"]),
+ )
+ else:
+ self.capture_device_combobox.setPlaceholderText('Select "Video Capture Device" above')
+ self.capture_device_combobox.setCurrentIndex(-1)
+
+ def __capture_method_changed(self):
+ selected_capture_method = CAPTURE_METHODS.get_method_by_index(self.capture_method_combobox.currentIndex())
+ self.__enable_capture_device_if_its_selected_method(selected_capture_method)
+ change_capture_method(selected_capture_method, self.autosplit)
+ return selected_capture_method
+
+ def __capture_device_changed(self):
+ device_index = self.capture_device_combobox.currentIndex()
+ if device_index == -1:
+ return
+ capture_device = self.__video_capture_devices[device_index]
+ self.autosplit.settings_dict["capture_device_name"] = capture_device.name
+ self.autosplit.settings_dict["capture_device_id"] = capture_device.device_id
+ if self.autosplit.settings_dict["capture_method"] == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE:
+ # Re-initializes the VideoCaptureDeviceCaptureMethod
+ change_capture_method(CaptureMethodEnum.VIDEO_CAPTURE_DEVICE, self.autosplit)
+
+ def __fps_limit_changed(self, value: int):
+ value = self.fps_limit_spinbox.value()
+ self.autosplit.settings_dict["fps_limit"] = value
+ self.autosplit.timer_live_image.setInterval(int(1000 / value))
+ self.autosplit.timer_live_image.setInterval(int(1000 / value))
+
+ @fire_and_forget
+ def __set_all_capture_devices(self):
+ self.__video_capture_devices = asyncio.run(get_all_video_capture_devices())
+ if len(self.__video_capture_devices) > 0:
+ for i in range(self.capture_device_combobox.count()):
+ self.capture_device_combobox.removeItem(i)
+ self.capture_device_combobox.addItems([
+ f"* {device.name}"
+ + (f" [{device.backend}]" if device.backend else "")
+ + (" (occupied)" if device.occupied else "")
+ for device in self.__video_capture_devices
+ ])
+ self.__enable_capture_device_if_its_selected_method()
+ else:
+ self.capture_device_combobox.setPlaceholderText("No device found.")
+
+ def __set_readme_link(self):
+ self.custom_image_settings_info_label.setText(
+ self.custom_image_settings_info_label
+ .text()
+ .format(GITHUB_REPOSITORY=GITHUB_REPOSITORY),
+ )
+ # HACK: This is a workaround because custom_image_settings_info_label
+ # simply will not open links with a left click no matter what we tried.
+ self.readme_link_button.clicked.connect(
+ lambda: webbrowser.open(f"https://github.com/{GITHUB_REPOSITORY}#readme"),
+ )
+ self.readme_link_button.setStyleSheet("border: 0px; background-color:rgba(0,0,0,0%);")
+
+ def __select_screenshot_directory(self):
+ self.autosplit.settings_dict["screenshot_directory"] = QFileDialog.getExistingDirectory(
+ self,
+ "Select Screenshots Directory",
+ self.autosplit.settings_dict["screenshot_directory"]
+ or self.autosplit.settings_dict["split_image_directory"],
+ )
+ self.screenshot_directory_input.setText(self.autosplit.settings_dict["screenshot_directory"])
+
+ def __setup_bindings(self):
+ # Hotkey initial values and bindings
+ def hotkey_connect(hotkey: Hotkey):
+ return lambda: set_hotkey(self.autosplit, hotkey)
+
+ for hotkey in HOTKEYS:
+ hotkey_input: QtWidgets.QLineEdit = getattr(self, f"{hotkey}_input")
+ set_hotkey_hotkey_button: QtWidgets.QPushButton = getattr(self, f"set_{hotkey}_hotkey_button")
+ hotkey_input.setText(self.autosplit.settings_dict.get(f"{hotkey}_hotkey", ""))
+
+ set_hotkey_hotkey_button.clicked.connect(hotkey_connect(hotkey))
+ # Make it very clear that hotkeys are not used when auto-controlled
+ if self.autosplit.is_auto_controlled and hotkey != "toggle_auto_reset_image":
+ set_hotkey_hotkey_button.setEnabled(False)
+ hotkey_input.setEnabled(False)
+
+# region Set initial values
+ # Capture Settings
+ self.fps_limit_spinbox.setValue(self.autosplit.settings_dict["fps_limit"])
+ self.live_capture_region_checkbox.setChecked(self.autosplit.settings_dict["live_capture_region"])
+ self.capture_method_combobox.setCurrentIndex(
+ CAPTURE_METHODS.get_index(self.autosplit.settings_dict["capture_method"]),
+ )
+ # No self.capture_device_combobox.setCurrentIndex
+ # It'll set itself asynchronously in self.__set_all_capture_devices()
+ self.screenshot_directory_input.setText(self.autosplit.settings_dict["screenshot_directory"])
+ self.open_screenshot_checkbox.setChecked(self.autosplit.settings_dict["open_screenshot"])
+
+ # Image Settings
+ self.default_comparison_method_combobox.setCurrentIndex(
+ self.autosplit.settings_dict["default_comparison_method"],
+ )
+ self.default_similarity_threshold_spinbox.setValue(self.autosplit.settings_dict["default_similarity_threshold"])
+ self.default_delay_time_spinbox.setValue(self.autosplit.settings_dict["default_delay_time"])
+ self.default_pause_time_spinbox.setValue(self.autosplit.settings_dict["default_pause_time"])
+ self.loop_splits_checkbox.setChecked(self.autosplit.settings_dict["loop_splits"])
+ self.start_also_resets_checkbox.setChecked(self.autosplit.settings_dict["start_also_resets"])
+ self.enable_auto_reset_image_checkbox.setChecked(self.autosplit.settings_dict["enable_auto_reset"])
+# endregion
+# region Binding
+ # Capture Settings
+ self.fps_limit_spinbox.valueChanged.connect(self.__fps_limit_changed)
+ self.live_capture_region_checkbox.stateChanged.connect(
+ lambda: self.__set_value("live_capture_region", self.live_capture_region_checkbox.isChecked()),
+ )
+ self.capture_method_combobox.currentIndexChanged.connect(
+ lambda: self.__set_value("capture_method", self.__capture_method_changed()),
+ )
+ self.capture_device_combobox.currentIndexChanged.connect(self.__capture_device_changed)
+ self.screenshot_directory_browse_button.clicked.connect(self.__select_screenshot_directory)
+ self.open_screenshot_checkbox.stateChanged.connect(
+ lambda: self.__set_value("open_screenshot", self.open_screenshot_checkbox.isChecked()),
+ )
+
+ # Image Settings
+ self.default_comparison_method_combobox.currentIndexChanged.connect(
+ lambda: self.__set_value(
+ "default_comparison_method",
+ self.default_comparison_method_combobox.currentIndex(),
+ ),
+ )
+ self.default_similarity_threshold_spinbox.valueChanged.connect(
+ lambda: self.__update_default_threshold(self.default_similarity_threshold_spinbox.value()),
+ )
+ self.default_delay_time_spinbox.valueChanged.connect(
+ lambda: self.__set_value("default_delay_time", self.default_delay_time_spinbox.value()),
+ )
+ self.default_pause_time_spinbox.valueChanged.connect(
+ lambda: self.__set_value("default_pause_time", self.default_pause_time_spinbox.value()),
+ )
+ self.loop_splits_checkbox.stateChanged.connect(
+ lambda: self.__set_value("loop_splits", self.loop_splits_checkbox.isChecked()),
+ )
+ self.start_also_resets_checkbox.stateChanged.connect(
+ lambda: self.__set_value("start_also_resets", self.start_also_resets_checkbox.isChecked()),
+ )
+ self.enable_auto_reset_image_checkbox.stateChanged.connect(
+ lambda: self.__set_value("enable_auto_reset", self.enable_auto_reset_image_checkbox.isChecked()),
+ )
+# endregion
+
+
+def open_settings(autosplit: "AutoSplit"):
+ if not autosplit.SettingsWidget or cast(QtWidgets.QWidget, autosplit.SettingsWidget).isHidden():
+ autosplit.SettingsWidget = __SettingsWidget(autosplit)
+
+
+def get_default_settings_from_ui(autosplit: "AutoSplit"):
+ temp_dialog = QtWidgets.QWidget()
+ default_settings_dialog = settings_ui.Ui_SettingsWidget()
+ default_settings_dialog.setupUi(temp_dialog)
+ default_settings: user_profile.UserProfileDict = {
+ "split_hotkey": default_settings_dialog.split_input.text(),
+ "reset_hotkey": default_settings_dialog.reset_input.text(),
+ "undo_split_hotkey": default_settings_dialog.undo_split_input.text(),
+ "skip_split_hotkey": default_settings_dialog.skip_split_input.text(),
+ "pause_hotkey": default_settings_dialog.pause_input.text(),
+ "screenshot_hotkey": default_settings_dialog.screenshot_input.text(),
+ "toggle_auto_reset_image_hotkey": default_settings_dialog.toggle_auto_reset_image_input.text(),
+ "fps_limit": default_settings_dialog.fps_limit_spinbox.value(),
+ "live_capture_region": default_settings_dialog.live_capture_region_checkbox.isChecked(),
+ "capture_method": CAPTURE_METHODS.get_method_by_index(
+ default_settings_dialog.capture_method_combobox.currentIndex(),
+ ),
+ "capture_device_id": default_settings_dialog.capture_device_combobox.currentIndex(),
+ "capture_device_name": "",
+ "default_comparison_method": default_settings_dialog.default_comparison_method_combobox.currentIndex(),
+ "default_similarity_threshold": default_settings_dialog.default_similarity_threshold_spinbox.value(),
+ "default_delay_time": default_settings_dialog.default_delay_time_spinbox.value(),
+ "default_pause_time": default_settings_dialog.default_pause_time_spinbox.value(),
+ "loop_splits": default_settings_dialog.loop_splits_checkbox.isChecked(),
+ "start_also_resets": default_settings_dialog.start_also_resets_checkbox.isChecked(),
+ "enable_auto_reset": default_settings_dialog.enable_auto_reset_image_checkbox.isChecked(),
+ "split_image_directory": autosplit.split_image_folder_input.text(),
+ "screenshot_directory": default_settings_dialog.screenshot_directory_input.text(),
+ "open_screenshot": default_settings_dialog.open_screenshot_checkbox.isChecked(),
+ "captured_window_title": "",
+ "capture_region": {
+ "x": autosplit.x_spinbox.value(),
+ "y": autosplit.y_spinbox.value(),
+ "width": autosplit.width_spinbox.value(),
+ "height": autosplit.height_spinbox.value(),
+ },
+ }
+ del temp_dialog
+ return default_settings
diff --git a/src/region_selection.py b/src/region_selection.py
index 76ecf277..137e487a 100644
--- a/src/region_selection.py
+++ b/src/region_selection.py
@@ -1,397 +1,397 @@
-from __future__ import annotations
-
-import ctypes
-import ctypes.wintypes
-import os
-from math import ceil
-from typing import TYPE_CHECKING
-
-import cv2
-import numpy as np
-from cv2.typing import MatLike
-from PySide6 import QtCore, QtGui, QtWidgets
-from PySide6.QtTest import QTest
-from pywinctl import getTopWindowAt
-from typing_extensions import override
-from win32 import win32gui
-from win32con import SM_CXVIRTUALSCREEN, SM_CYVIRTUALSCREEN, SM_XVIRTUALSCREEN, SM_YVIRTUALSCREEN
-from winsdk._winrt import initialize_with_window
-from winsdk.windows.foundation import AsyncStatus, IAsyncOperation
-from winsdk.windows.graphics.capture import GraphicsCaptureItem, GraphicsCapturePicker
-
-import error_messages
-from utils import (
- BGR_CHANNEL_COUNT,
- MAXBYTE,
- ImageShape,
- auto_split_directory,
- get_window_bounds,
- is_valid_hwnd,
- is_valid_image,
-)
-
-user32 = ctypes.windll.user32
-
-
-if TYPE_CHECKING:
-
- from AutoSplit import AutoSplit
-
-ALIGN_REGION_THRESHOLD = 0.9
-BORDER_WIDTH = 2
-SUPPORTED_IMREAD_FORMATS = [
- ("Windows bitmaps", "*.bmp *.dib"),
- ("JPEG files", "*.jpeg *.jpg *.jpe"),
- ("JPEG 2000 files", "*.jp2"),
- ("Portable Network Graphics", "*.png"),
- ("WebP", "*.webp"),
- ("AVIF", "*.avif"),
- ("Portable image format", "*.pbm *.pgm *.ppm *.pxm *.pnm"),
- ("PFM files", "*.pfm"),
- ("Sun rasters", "*.sr *.ras"),
- ("TIFF files", "*.tiff *.tif"),
- ("OpenEXR Image files", "*.exr"),
- ("Radiance HDR", "*.hdr *.pic"),
-]
-"""https://docs.opencv.org/4.8.0/d4/da8/group__imgcodecs.html#imread"""
-IMREAD_EXT_FILTER = "All Files (" \
- + " ".join([f"{extensions}" for _, extensions in SUPPORTED_IMREAD_FORMATS]) \
- + ");;"\
- + ";;".join([f"{imread_format} ({extensions})" for imread_format, extensions in SUPPORTED_IMREAD_FORMATS])
-
-
-def __select_graphics_item(autosplit: AutoSplit): # pyright: ignore [reportUnusedFunction]
- # TODO: For later as a different picker option
- """Uses the built-in GraphicsCapturePicker to select the Window."""
- def callback(async_operation: IAsyncOperation[GraphicsCaptureItem], async_status: AsyncStatus):
- try:
- if async_status != AsyncStatus.COMPLETED:
- return
- except SystemError as exception:
- # HACK: can happen when closing the GraphicsCapturePicker
- if str(exception).endswith("returned a result with an error set"):
- return
- raise
- item = async_operation.get_results()
- if not item:
- return
- autosplit.settings_dict["captured_window_title"] = item.display_name
- autosplit.capture_method.reinitialize(autosplit)
-
- picker = GraphicsCapturePicker()
- initialize_with_window(picker, int(autosplit.effectiveWinId()))
- async_operation = picker.pick_single_item_async()
- # None if the selection is canceled
- if async_operation:
- async_operation.completed = callback
-
-
-def select_region(autosplit: AutoSplit):
- # Create a screen selector widget
- selector = SelectRegionWidget()
-
- # Need to wait until the user has selected a region using the widget
- # before moving on with selecting the window settings
- while True:
- width = selector.width()
- height = selector.height()
- x = selector.x()
- y = selector.y()
- if width > 0 and height > 0:
- break
- QTest.qWait(1)
- del selector
-
- window = getTopWindowAt(x, y)
- if not window:
- error_messages.region()
- return
- hwnd = window.getHandle()
- window_text = window.title
- if not is_valid_hwnd(hwnd) or not window_text:
- error_messages.region()
- return
-
- autosplit.hwnd = hwnd
- autosplit.settings_dict["captured_window_title"] = window_text
- autosplit.capture_method.reinitialize(autosplit)
-
- left_bounds, top_bounds, *_ = get_window_bounds(hwnd)
- window_x, window_y, *_ = win32gui.GetWindowRect(hwnd)
- offset_x = window_x + left_bounds
- offset_y = window_y + top_bounds
- __set_region_values(
- autosplit,
- left=x - offset_x,
- top=y - offset_y,
- width=width,
- height=height,
- )
-
-
-def select_window(autosplit: AutoSplit):
- # Create a screen selector widget
- selector = SelectWindowWidget()
-
- # Need to wait until the user has selected a region using the widget before moving on with
- # selecting the window settings
- while True:
- x = selector.x()
- y = selector.y()
- if x and y:
- break
- QTest.qWait(1)
- del selector
-
- window = getTopWindowAt(x, y)
- if not window:
- error_messages.region()
- return
- hwnd = window.getHandle()
- window_text = window.title
- if not is_valid_hwnd(hwnd) or not window_text:
- error_messages.region()
- return
-
- autosplit.hwnd = hwnd
- autosplit.settings_dict["captured_window_title"] = window_text
- autosplit.capture_method.reinitialize(autosplit)
-
- # Exlude the borders and titlebar from the window selection. To only get the client area.
- _, __, window_width, window_height = get_window_bounds(hwnd)
- _, __, client_width, client_height = win32gui.GetClientRect(hwnd)
- border_width = ceil((window_width - client_width) / 2)
- titlebar_with_border_height = window_height - client_height - border_width
-
- __set_region_values(
- autosplit,
- left=border_width,
- top=titlebar_with_border_height,
- width=client_width,
- height=client_height - border_width * 2,
- )
-
-
-def align_region(autosplit: AutoSplit):
- # Check to see if a region has been set
- if not autosplit.capture_method.check_selected_region_exists(autosplit):
- error_messages.region()
- return
- # This is the image used for aligning the capture region to the best fit for the user.
- template_filename = QtWidgets.QFileDialog.getOpenFileName(
- autosplit,
- "Select Reference Image",
- autosplit.settings_dict["split_image_directory"] or auto_split_directory,
- IMREAD_EXT_FILTER,
- )[0]
-
- # Return if the user presses cancel
- if not template_filename:
- return
-
- template = cv2.imread(template_filename, cv2.IMREAD_UNCHANGED)
- # Add alpha channel to template if it's missing.
- if template.shape[ImageShape.Channels] == BGR_CHANNEL_COUNT:
- template = cv2.cvtColor(template, cv2.COLOR_BGR2BGRA)
-
- # Validate template is a valid image file
- if not is_valid_image(template):
- error_messages.image_validity()
- return
-
- # Obtaining the capture of a region which contains the
- # subregion being searched for to align the image.
- capture, _ = autosplit.capture_method.get_frame(autosplit)
-
- if not is_valid_image(capture):
- error_messages.region()
- return
-
- best_match, best_height, best_width, best_loc = __test_alignment(capture, template)
-
- # Go ahead and check if this satisfies our requirement before setting the region
- # We don't want a low similarity image to be aligned.
- if best_match < ALIGN_REGION_THRESHOLD:
- error_messages.alignment_not_matched()
- return
-
- # The new region can be defined by using the min_loc point and the best_height and best_width of the template.
- __set_region_values(
- autosplit,
- left=autosplit.settings_dict["capture_region"]["x"] + best_loc[0],
- top=autosplit.settings_dict["capture_region"]["y"] + best_loc[1],
- width=best_width,
- height=best_height,
- )
-
-
-def __set_region_values(autosplit: AutoSplit, left: int, top: int, width: int, height: int):
- autosplit.settings_dict["capture_region"]["x"] = left
- autosplit.settings_dict["capture_region"]["y"] = top
- autosplit.settings_dict["capture_region"]["width"] = width
- autosplit.settings_dict["capture_region"]["height"] = height
-
- autosplit.x_spinbox.setValue(left)
- autosplit.y_spinbox.setValue(top)
- autosplit.width_spinbox.setValue(width)
- autosplit.height_spinbox.setValue(height)
-
-
-def __test_alignment(capture: MatLike, template: MatLike):
- """
- Obtain the best matching point for the template within the
- capture. This assumes that the template is actually smaller
- than the dimensions of the capture. Since we are using SQDIFF
- the best match will be the min_val which is located at min_loc.
- The best match found in the image, set everything to 0 by default
- so that way the first match will overwrite these values.
- """
- best_match = 0.0
- best_height = 0
- best_width = 0
- best_loc = (0, 0)
-
- # This tests 50 images scaled from 20% to 300% of the original template size
- for scale in np.linspace(0.2, 3, num=56):
- width = int(template.shape[ImageShape.X] * scale)
- height = int(template.shape[ImageShape.Y] * scale)
-
- # The template can not be larger than the capture
- if width > capture.shape[ImageShape.X] or height > capture.shape[ImageShape.Y]:
- continue
-
- resized = cv2.resize(template, (width, height), interpolation=cv2.INTER_NEAREST)
-
- result = cv2.matchTemplate(capture, resized, cv2.TM_SQDIFF)
- min_val, _, min_loc, *_ = cv2.minMaxLoc(result)
-
- # The maximum value for SQ_DIFF is dependent on the size of the template
- # we need this value to normalize it from 0.0 to 1.0
- max_error = resized.size * MAXBYTE * MAXBYTE
- similarity = 1 - (min_val / max_error)
-
- # Check if the similarity was good enough to get alignment
- if similarity > best_match:
- best_match = similarity
- best_width = width
- best_height = height
- best_loc = min_loc
- return best_match, best_height, best_width, best_loc
-
-
-def validate_before_parsing(autosplit: AutoSplit, show_error: bool = True, check_empty_directory: bool = True):
- error = None
- if not autosplit.settings_dict["split_image_directory"]:
- error = error_messages.split_image_directory
- elif not os.path.isdir(autosplit.settings_dict["split_image_directory"]):
- error = error_messages.split_image_directory_not_found
- elif check_empty_directory and not os.listdir(autosplit.settings_dict["split_image_directory"]):
- error = error_messages.split_image_directory_empty
- elif not autosplit.capture_method.check_selected_region_exists(autosplit):
- error = error_messages.region
- if error and show_error:
- error()
- return not error
-
-
-class BaseSelectWidget(QtWidgets.QWidget):
- _x = 0
- _y = 0
-
- @override
- def x(self):
- return self._x
-
- @override
- def y(self):
- return self._y
-
- def __init__(self):
- super().__init__()
- # We need to pull the monitor information to correctly draw the geometry covering all portions
- # of the user's screen. These parameters create the bounding box with left, top, width, and height
- self.setGeometry(
- user32.GetSystemMetrics(SM_XVIRTUALSCREEN),
- user32.GetSystemMetrics(SM_YVIRTUALSCREEN),
- user32.GetSystemMetrics(SM_CXVIRTUALSCREEN),
- user32.GetSystemMetrics(SM_CYVIRTUALSCREEN),
- )
- self.setWindowTitle(" ")
- self.setWindowOpacity(0.5)
- self.setWindowFlags(QtCore.Qt.WindowType.FramelessWindowHint)
- self.show()
-
- @override
- def keyPressEvent(self, event: QtGui.QKeyEvent):
- if event.key() == QtCore.Qt.Key.Key_Escape:
- self.close()
-
-
-class SelectWindowWidget(BaseSelectWidget):
- """Widget to select a window and obtain its bounds."""
-
- @override
- def mouseReleaseEvent(self, event: QtGui.QMouseEvent):
- self._x = int(event.position().x()) + self.geometry().x()
- self._y = int(event.position().y()) + self.geometry().y()
- self.close()
-
-
-class SelectRegionWidget(BaseSelectWidget):
- """
- Widget for dragging screen region
- Originated from https://github.com/harupy/snipping-tool .
- """
-
- _right: int = 0
- _bottom: int = 0
- __begin = QtCore.QPoint()
- __end = QtCore.QPoint()
-
- def __init__(self):
- QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.CrossCursor))
- super().__init__()
-
- @override
- def height(self):
- return self._bottom - self._y
-
- @override
- def width(self):
- return self._right - self._x
-
- @override
- def paintEvent(self, event: QtGui.QPaintEvent):
- if self.__begin != self.__end:
- qpainter = QtGui.QPainter(self)
- qpainter.setPen(QtGui.QPen(QtGui.QColor("red"), BORDER_WIDTH))
- qpainter.setBrush(QtGui.QColor("opaque"))
- qpainter.drawRect(QtCore.QRect(self.__begin, self.__end))
-
- @override
- def mousePressEvent(self, event: QtGui.QMouseEvent):
- self.__begin = event.position().toPoint()
- self.__end = self.__begin
- self.update()
-
- @override
- def mouseMoveEvent(self, event: QtGui.QMouseEvent):
- self.__end = event.position().toPoint()
- self.update()
-
- @override
- def mouseReleaseEvent(self, event: QtGui.QMouseEvent):
- if self.__begin != self.__end:
- # The coordinates are pulled relative to the top left of the set geometry,
- # so the added virtual screen offsets convert them back to the virtual screen coordinates
- self._x = min(self.__begin.x(), self.__end.x()) + self.geometry().x()
- self._y = min(self.__begin.y(), self.__end.y()) + self.geometry().y()
- self._right = max(self.__begin.x(), self.__end.x()) + self.geometry().x()
- self._bottom = max(self.__begin.y(), self.__end.y()) + self.geometry().y()
-
- self.close()
-
- @override
- def close(self):
- QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.ArrowCursor))
- return super().close()
+import ctypes
+import ctypes.wintypes
+import os
+from math import ceil
+from typing import TYPE_CHECKING
+
+import cv2
+import numpy as np
+from cv2.typing import MatLike
+from PySide6 import QtCore, QtGui, QtWidgets
+from PySide6.QtTest import QTest
+from pywinctl import getTopWindowAt
+from typing_extensions import override
+from win32 import win32gui
+from win32con import SM_CXVIRTUALSCREEN, SM_CYVIRTUALSCREEN, SM_XVIRTUALSCREEN, SM_YVIRTUALSCREEN
+from winsdk._winrt import initialize_with_window
+from winsdk.windows.foundation import AsyncStatus, IAsyncOperation
+from winsdk.windows.graphics.capture import GraphicsCaptureItem, GraphicsCapturePicker
+
+import error_messages
+from utils import (
+ BGR_CHANNEL_COUNT,
+ MAXBYTE,
+ ImageShape,
+ auto_split_directory,
+ get_window_bounds,
+ is_valid_hwnd,
+ is_valid_image,
+)
+
+user32 = ctypes.windll.user32
+
+
+if TYPE_CHECKING:
+ from AutoSplit import AutoSplit
+
+ALIGN_REGION_THRESHOLD = 0.9
+BORDER_WIDTH = 2
+SUPPORTED_IMREAD_FORMATS = [
+ ("Windows bitmaps", "*.bmp *.dib"),
+ ("JPEG files", "*.jpeg *.jpg *.jpe"),
+ ("JPEG 2000 files", "*.jp2"),
+ ("Portable Network Graphics", "*.png"),
+ ("WebP", "*.webp"),
+ ("AVIF", "*.avif"),
+ ("Portable image format", "*.pbm *.pgm *.ppm *.pxm *.pnm"),
+ ("PFM files", "*.pfm"),
+ ("Sun rasters", "*.sr *.ras"),
+ ("TIFF files", "*.tiff *.tif"),
+ ("OpenEXR Image files", "*.exr"),
+ ("Radiance HDR", "*.hdr *.pic"),
+]
+"""https://docs.opencv.org/4.8.0/d4/da8/group__imgcodecs.html#imread"""
+IMREAD_EXT_FILTER = (
+ "All Files ("
+ + " ".join([f"{extensions}" for _, extensions in SUPPORTED_IMREAD_FORMATS])
+ + ");;"
+ + ";;".join([f"{imread_format} ({extensions})" for imread_format, extensions in SUPPORTED_IMREAD_FORMATS])
+)
+
+
+# TODO: For later as a different picker option
+def __select_graphics_item(autosplit: "AutoSplit"): # pyright: ignore [reportUnusedFunction]
+ """Uses the built-in GraphicsCapturePicker to select the Window."""
+
+ def callback(async_operation: IAsyncOperation[GraphicsCaptureItem], async_status: AsyncStatus):
+ try:
+ if async_status != AsyncStatus.COMPLETED:
+ return
+ except SystemError as exception:
+ # HACK: can happen when closing the GraphicsCapturePicker
+ if str(exception).endswith("returned a result with an error set"):
+ return
+ raise
+ item = async_operation.get_results()
+ if not item:
+ return
+ autosplit.settings_dict["captured_window_title"] = item.display_name
+ autosplit.capture_method.reinitialize(autosplit)
+
+ picker = GraphicsCapturePicker()
+ initialize_with_window(picker, int(autosplit.effectiveWinId()))
+ async_operation = picker.pick_single_item_async()
+ # None if the selection is canceled
+ if async_operation:
+ async_operation.completed = callback
+
+
+def select_region(autosplit: "AutoSplit"):
+ # Create a screen selector widget
+ selector = SelectRegionWidget()
+
+ # Need to wait until the user has selected a region using the widget
+ # before moving on with selecting the window settings
+ while True:
+ width = selector.width()
+ height = selector.height()
+ x = selector.x()
+ y = selector.y()
+ if width > 0 and height > 0:
+ break
+ QTest.qWait(1)
+ del selector
+
+ window = getTopWindowAt(x, y)
+ if not window:
+ error_messages.region()
+ return
+ hwnd = window.getHandle()
+ window_text = window.title
+ if not is_valid_hwnd(hwnd) or not window_text:
+ error_messages.region()
+ return
+
+ autosplit.hwnd = hwnd
+ autosplit.settings_dict["captured_window_title"] = window_text
+ autosplit.capture_method.reinitialize(autosplit)
+
+ left_bounds, top_bounds, *_ = get_window_bounds(hwnd)
+ window_x, window_y, *_ = win32gui.GetWindowRect(hwnd)
+ offset_x = window_x + left_bounds
+ offset_y = window_y + top_bounds
+ __set_region_values(
+ autosplit,
+ left=x - offset_x,
+ top=y - offset_y,
+ width=width,
+ height=height,
+ )
+
+
+def select_window(autosplit: "AutoSplit"):
+ # Create a screen selector widget
+ selector = SelectWindowWidget()
+
+ # Need to wait until the user has selected a region using the widget before moving on with
+ # selecting the window settings
+ while True:
+ x = selector.x()
+ y = selector.y()
+ if x and y:
+ break
+ QTest.qWait(1)
+ del selector
+
+ window = getTopWindowAt(x, y)
+ if not window:
+ error_messages.region()
+ return
+ hwnd = window.getHandle()
+ window_text = window.title
+ if not is_valid_hwnd(hwnd) or not window_text:
+ error_messages.region()
+ return
+
+ autosplit.hwnd = hwnd
+ autosplit.settings_dict["captured_window_title"] = window_text
+ autosplit.capture_method.reinitialize(autosplit)
+
+ # Exlude the borders and titlebar from the window selection. To only get the client area.
+ _, __, window_width, window_height = get_window_bounds(hwnd)
+ _, __, client_width, client_height = win32gui.GetClientRect(hwnd)
+ border_width = ceil((window_width - client_width) / 2)
+ titlebar_with_border_height = window_height - client_height - border_width
+
+ __set_region_values(
+ autosplit,
+ left=border_width,
+ top=titlebar_with_border_height,
+ width=client_width,
+ height=client_height - border_width * 2,
+ )
+
+
+def align_region(autosplit: "AutoSplit"):
+ # Check to see if a region has been set
+ if not autosplit.capture_method.check_selected_region_exists(autosplit):
+ error_messages.region()
+ return
+ # This is the image used for aligning the capture region to the best fit for the user.
+ template_filename = QtWidgets.QFileDialog.getOpenFileName(
+ autosplit,
+ "Select Reference Image",
+ autosplit.settings_dict["split_image_directory"] or auto_split_directory,
+ IMREAD_EXT_FILTER,
+ )[0]
+
+ # Return if the user presses cancel
+ if not template_filename:
+ return
+
+ template = cv2.imread(template_filename, cv2.IMREAD_UNCHANGED)
+ # Add alpha channel to template if it's missing.
+ if template.shape[ImageShape.Channels] == BGR_CHANNEL_COUNT:
+ template = cv2.cvtColor(template, cv2.COLOR_BGR2BGRA)
+
+ # Validate template is a valid image file
+ if not is_valid_image(template):
+ error_messages.image_validity()
+ return
+
+ # Obtaining the capture of a region which contains the
+ # subregion being searched for to align the image.
+ capture, _ = autosplit.capture_method.get_frame(autosplit)
+
+ if not is_valid_image(capture):
+ error_messages.region()
+ return
+
+ best_match, best_height, best_width, best_loc = __test_alignment(capture, template)
+
+ # Go ahead and check if this satisfies our requirement before setting the region
+ # We don't want a low similarity image to be aligned.
+ if best_match < ALIGN_REGION_THRESHOLD:
+ error_messages.alignment_not_matched()
+ return
+
+ # The new region can be defined by using the min_loc point and the best_height and best_width of the template.
+ __set_region_values(
+ autosplit,
+ left=autosplit.settings_dict["capture_region"]["x"] + best_loc[0],
+ top=autosplit.settings_dict["capture_region"]["y"] + best_loc[1],
+ width=best_width,
+ height=best_height,
+ )
+
+
+def __set_region_values(autosplit: "AutoSplit", left: int, top: int, width: int, height: int):
+ autosplit.settings_dict["capture_region"]["x"] = left
+ autosplit.settings_dict["capture_region"]["y"] = top
+ autosplit.settings_dict["capture_region"]["width"] = width
+ autosplit.settings_dict["capture_region"]["height"] = height
+
+ autosplit.x_spinbox.setValue(left)
+ autosplit.y_spinbox.setValue(top)
+ autosplit.width_spinbox.setValue(width)
+ autosplit.height_spinbox.setValue(height)
+
+
+def __test_alignment(capture: MatLike, template: MatLike):
+ """
+ Obtain the best matching point for the template within the
+ capture. This assumes that the template is actually smaller
+ than the dimensions of the capture. Since we are using SQDIFF
+ the best match will be the min_val which is located at min_loc.
+ The best match found in the image, set everything to 0 by default
+ so that way the first match will overwrite these values.
+ """
+ best_match = 0.0
+ best_height = 0
+ best_width = 0
+ best_loc = (0, 0)
+
+ # This tests 50 images scaled from 20% to 300% of the original template size
+ for scale in np.linspace(0.2, 3, num=56):
+ width = int(template.shape[ImageShape.X] * scale)
+ height = int(template.shape[ImageShape.Y] * scale)
+
+ # The template can not be larger than the capture
+ if width > capture.shape[ImageShape.X] or height > capture.shape[ImageShape.Y]:
+ continue
+
+ resized = cv2.resize(template, (width, height), interpolation=cv2.INTER_NEAREST)
+
+ result = cv2.matchTemplate(capture, resized, cv2.TM_SQDIFF)
+ min_val, _, min_loc, *_ = cv2.minMaxLoc(result)
+
+ # The maximum value for SQ_DIFF is dependent on the size of the template
+ # we need this value to normalize it from 0.0 to 1.0
+ max_error = resized.size * MAXBYTE * MAXBYTE
+ similarity = 1 - (min_val / max_error)
+
+ # Check if the similarity was good enough to get alignment
+ if similarity > best_match:
+ best_match = similarity
+ best_width = width
+ best_height = height
+ best_loc = min_loc
+ return best_match, best_height, best_width, best_loc
+
+
+def validate_before_parsing(autosplit: "AutoSplit", show_error: bool = True, check_empty_directory: bool = True):
+ error = None
+ if not autosplit.settings_dict["split_image_directory"]:
+ error = error_messages.split_image_directory
+ elif not os.path.isdir(autosplit.settings_dict["split_image_directory"]):
+ error = error_messages.split_image_directory_not_found
+ elif check_empty_directory and not os.listdir(autosplit.settings_dict["split_image_directory"]):
+ error = error_messages.split_image_directory_empty
+ elif not autosplit.capture_method.check_selected_region_exists(autosplit):
+ error = error_messages.region
+ if error and show_error:
+ error()
+ return not error
+
+
+class BaseSelectWidget(QtWidgets.QWidget):
+ _x = 0
+ _y = 0
+
+ @override
+ def x(self):
+ return self._x
+
+ @override
+ def y(self):
+ return self._y
+
+ def __init__(self):
+ super().__init__()
+ # We need to pull the monitor information to correctly draw the geometry covering all portions
+ # of the user's screen. These parameters create the bounding box with left, top, width, and height
+ self.setGeometry(
+ user32.GetSystemMetrics(SM_XVIRTUALSCREEN),
+ user32.GetSystemMetrics(SM_YVIRTUALSCREEN),
+ user32.GetSystemMetrics(SM_CXVIRTUALSCREEN),
+ user32.GetSystemMetrics(SM_CYVIRTUALSCREEN),
+ )
+ self.setWindowTitle(" ")
+ self.setWindowOpacity(0.5)
+ self.setWindowFlags(QtCore.Qt.WindowType.FramelessWindowHint)
+ self.show()
+
+ @override
+ def keyPressEvent(self, event: QtGui.QKeyEvent):
+ if event.key() == QtCore.Qt.Key.Key_Escape:
+ self.close()
+
+
+class SelectWindowWidget(BaseSelectWidget):
+ """Widget to select a window and obtain its bounds."""
+
+ @override
+ def mouseReleaseEvent(self, event: QtGui.QMouseEvent):
+ self._x = int(event.position().x()) + self.geometry().x()
+ self._y = int(event.position().y()) + self.geometry().y()
+ self.close()
+
+
+class SelectRegionWidget(BaseSelectWidget):
+ """
+ Widget for dragging screen region
+ Originated from https://github.com/harupy/snipping-tool .
+ """
+
+ _right: int = 0
+ _bottom: int = 0
+ __begin = QtCore.QPoint()
+ __end = QtCore.QPoint()
+
+ def __init__(self):
+ QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.CrossCursor))
+ super().__init__()
+
+ @override
+ def height(self):
+ return self._bottom - self._y
+
+ @override
+ def width(self):
+ return self._right - self._x
+
+ @override
+ def paintEvent(self, event: QtGui.QPaintEvent):
+ if self.__begin != self.__end:
+ qpainter = QtGui.QPainter(self)
+ qpainter.setPen(QtGui.QPen(QtGui.QColor("red"), BORDER_WIDTH))
+ qpainter.setBrush(QtGui.QColor("opaque"))
+ qpainter.drawRect(QtCore.QRect(self.__begin, self.__end))
+
+ @override
+ def mousePressEvent(self, event: QtGui.QMouseEvent):
+ self.__begin = event.position().toPoint()
+ self.__end = self.__begin
+ self.update()
+
+ @override
+ def mouseMoveEvent(self, event: QtGui.QMouseEvent):
+ self.__end = event.position().toPoint()
+ self.update()
+
+ @override
+ def mouseReleaseEvent(self, event: QtGui.QMouseEvent):
+ if self.__begin != self.__end:
+ # The coordinates are pulled relative to the top left of the set geometry,
+ # so the added virtual screen offsets convert them back to the virtual screen coordinates
+ self._x = min(self.__begin.x(), self.__end.x()) + self.geometry().x()
+ self._y = min(self.__begin.y(), self.__end.y()) + self.geometry().y()
+ self._right = max(self.__begin.x(), self.__end.x()) + self.geometry().x()
+ self._bottom = max(self.__begin.y(), self.__end.y()) + self.geometry().y()
+
+ self.close()
+
+ @override
+ def close(self):
+ QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.ArrowCursor))
+ return super().close()
diff --git a/src/split_parser.py b/src/split_parser.py
index 070c9838..6c1281db 100644
--- a/src/split_parser.py
+++ b/src/split_parser.py
@@ -1,249 +1,249 @@
-from __future__ import annotations
-
-import os
-from collections.abc import Callable
-from typing import TYPE_CHECKING, TypeVar
-
-import error_messages
-from AutoSplitImage import RESET_KEYWORD, START_KEYWORD, AutoSplitImage, ImageType
-from utils import is_valid_image
-
-if TYPE_CHECKING:
- from AutoSplit import AutoSplit
-
-[
- DUMMY_FLAG,
- BELOW_FLAG,
- PAUSE_FLAG,
- *_,
-] = [1 << i for i in range(31)] # 32 bits of flags
-
-T = TypeVar("T", str, int, float)
-
-# Note, the following symbols cannot be used in a filename:
-# / \ : * ? " < > |
-
-
-def __value_from_filename(
- filename: str,
- delimiters: str,
- default_value: T,
-) -> T:
- if len(delimiters) != 2: # noqa: PLR2004
- raise ValueError("delimiters parameter must contain exactly 2 characters")
- try:
- string_value = filename.split(delimiters[0], 1)[1].split(delimiters[1])[0]
- value: T = type(default_value)(string_value)
- except (IndexError, ValueError):
- return default_value
- else:
- return value
-
-
-def threshold_from_filename(filename: str):
- """
- Retrieve the threshold from the filename, if there is no threshold or the threshold
- doesn't meet the requirements of being [0, 1], then None is returned.
-
- @param filename: String containing the file's name
- @return: A valid threshold, if not then None
- """
- # Check to make sure there is a valid floating point number between
- # parentheses of the filename
- value = __value_from_filename(filename, "()", -1.0)
-
- # Check to make sure if it is a valid threshold
- return value if 0 <= value <= 1 else None
-
-
-def pause_from_filename(filename: str):
- """
- Retrieve the pause time from the filename, if there is no pause time or the pause time
- isn't a valid positive number or 0, then None is returned.
-
- @param filename: String containing the file's name
- @return: A valid pause time, if not then None
- """
- # Check to make sure there is a valid pause time between brackets
- # of the filename
- value = __value_from_filename(filename, "[]", -1.0)
-
- # Pause times should always be positive or zero
- return value if value >= 0 else None
-
-
-def delay_time_from_filename(filename: str):
- """
- Retrieve the delay time from the filename, if there is no delay time or the delay time
- isn't a valid positive number or 0 number, then None is returned.
-
- @param filename: String containing the file's name
- @return: A valid delay time, if not then none
- """
- # Check to make sure there is a valid delay time between brackets
- # of the filename
- value = __value_from_filename(filename, "##", -1)
-
- # Delay times should always be positive or zero
- return value if value >= 0 else None
-
-
-def loop_from_filename(filename: str):
- """
- Retrieve the number of loops from filename, if there is no loop number or the loop number isn't valid,
- then 1 is returned.
-
- @param filename: String containing the file's name
- @return: A valid loop number, if not then 1
- """
- # Check to make sure there is a valid delay time between brackets
- # of the filename
- value = __value_from_filename(filename, "@@", 1)
-
- # Loop should always be positive
- return value if value >= 1 else 1
-
-
-def comparison_method_from_filename(filename: str):
- """
- Retrieve the comparison method index from filename, if there is no comparison method or the index isn't valid,
- then None is returned.
-
- @param filename: String containing the file's name
- @return: A valid comparison method index, if not then none
- """
- # Check to make sure there is a valid delay time between brackets
- # of the filename
- value = __value_from_filename(filename, "^^", -1)
-
- # Comparison method should always be positive or zero
- return value if value >= 0 else None
-
-
-def flags_from_filename(filename: str):
- """
- Retrieve the flags from the filename, if there are no flags then 0 is returned.
-
- @param filename: String containing the file's name
- @return: The flags as an integer, if invalid flags are found it returns 0
-
- list of flags:
- "d" = dummy, do nothing when this split is found
- "b" = below threshold, after threshold is met, split when it goes below the threhsold.
- "p" = pause, hit pause key when this split is found
- """
- # Check to make sure there are flags between curly braces
- # of the filename
- flags_str = __value_from_filename(filename, "{}", "")
-
- if not flags_str:
- return 0
-
- flags = 0x00
-
- for flag_str in flags_str:
- character = flag_str.upper()
- if character == "D":
- flags |= DUMMY_FLAG
- elif character == "B":
- flags |= BELOW_FLAG
- elif character == "P":
- flags |= PAUSE_FLAG
- # Legacy flags
- elif character == "M":
- continue
- else:
- # An invalid flag was caught, this filename was written incorrectly
- # return 0. We don't want to interpret any misleading filenames
- return 0
-
- # Check for any conflicting flags that were set
- # For instance, we can't have a dummy split also pause
- if (flags & DUMMY_FLAG == DUMMY_FLAG) and (flags & PAUSE_FLAG == PAUSE_FLAG):
- return 0
-
- return flags
-
-
-def __pop_image_type(split_image: list[AutoSplitImage], image_type: ImageType):
- for image in split_image:
- if image.image_type == image_type:
- split_image.remove(image)
- return image
-
- return None
-
-
-def parse_and_validate_images(autosplit: AutoSplit):
- # Get split images
- all_images = [
- AutoSplitImage(os.path.join(autosplit.settings_dict["split_image_directory"], image_name))
- for image_name
- in os.listdir(autosplit.settings_dict["split_image_directory"])
- ]
-
- # Find non-split images and then remove them from the list
- start_image = __pop_image_type(all_images, ImageType.START)
- reset_image = __pop_image_type(all_images, ImageType.RESET)
- split_images = all_images
-
- error_message: Callable[[], object] | None = None
-
- # If there is no start hotkey set but a Start Image is present, and is not auto controlled, throw an error.
- if (
- start_image
- and not autosplit.settings_dict["split_hotkey"]
- and not autosplit.is_auto_controlled
- ):
- error_message = error_messages.load_start_image
-
- # If there is no reset hotkey set but a Reset Image is present, and is not auto controlled, throw an error.
- elif (
- reset_image
- and not autosplit.settings_dict["reset_hotkey"]
- and not autosplit.is_auto_controlled
- ):
- error_message = error_messages.reset_hotkey
-
- # Make sure that each of the images follows the guidelines for correct format
- # according to all of the settings selected by the user.
- else:
- for image in split_images:
- # Test for image without transparency
- if not is_valid_image(image.byte_array):
- def image_validity(filename: str):
- return lambda: error_messages.image_validity(filename)
- error_message = image_validity(image.filename)
- break
-
- # error out if there is a {p} flag but no pause hotkey set and is not auto controlled.
- if (
- not autosplit.settings_dict["pause_hotkey"]
- and image.check_flag(PAUSE_FLAG)
- and not autosplit.is_auto_controlled
- ):
- error_message = error_messages.pause_hotkey
- break
-
- # Check that there's only one Reset Image
- if image.image_type == ImageType.RESET:
- error_message = lambda: error_messages.multiple_keyword_images(RESET_KEYWORD) # noqa: E731
- break
-
- # Check that there's only one Start Image
- if image.image_type == ImageType.START:
- error_message = lambda: error_messages.multiple_keyword_images(START_KEYWORD) # noqa: E731
- break
-
- if error_message:
- autosplit.start_image = None
- autosplit.reset_image = None
- autosplit.split_images = []
- autosplit.gui_changes_on_reset()
- error_message()
- return False
-
- autosplit.start_image = start_image
- autosplit.reset_image = reset_image
- autosplit.split_images = split_images
- return True
+import os
+from collections.abc import Callable
+from typing import TYPE_CHECKING, TypeVar
+
+import error_messages
+from AutoSplitImage import RESET_KEYWORD, START_KEYWORD, AutoSplitImage, ImageType
+from utils import is_valid_image
+
+if TYPE_CHECKING:
+ from AutoSplit import AutoSplit
+
+[
+ DUMMY_FLAG,
+ BELOW_FLAG,
+ PAUSE_FLAG,
+ *_,
+] = [1 << i for i in range(31)] # 32 bits of flags
+
+T = TypeVar("T", str, int, float)
+
+# Note, the following symbols cannot be used in a filename:
+# / \ : * ? " < > |
+
+
+def __value_from_filename(
+ filename: str,
+ delimiters: str,
+ default_value: T,
+) -> T:
+ if len(delimiters) != 2: # noqa: PLR2004
+ raise ValueError("delimiters parameter must contain exactly 2 characters")
+ try:
+ string_value = filename.split(delimiters[0], 1)[1].split(delimiters[1])[0]
+ value: T = type(default_value)(string_value)
+ except (IndexError, ValueError):
+ return default_value
+ else:
+ return value
+
+
+def threshold_from_filename(filename: str):
+ """
+ Retrieve the threshold from the filename, if there is no threshold or the threshold
+ doesn't meet the requirements of being [0, 1], then None is returned.
+
+ @param filename: String containing the file's name
+ @return: A valid threshold, if not then None
+ """
+ # Check to make sure there is a valid floating point number between
+ # parentheses of the filename
+ value = __value_from_filename(filename, "()", -1.0)
+
+ # Check to make sure if it is a valid threshold
+ return value if 0 <= value <= 1 else None
+
+
+def pause_from_filename(filename: str):
+ """
+ Retrieve the pause time from the filename, if there is no pause time or the pause time
+ isn't a valid positive number or 0, then None is returned.
+
+ @param filename: String containing the file's name
+ @return: A valid pause time, if not then None
+ """
+ # Check to make sure there is a valid pause time between brackets
+ # of the filename
+ value = __value_from_filename(filename, "[]", -1.0)
+
+ # Pause times should always be positive or zero
+ return value if value >= 0 else None
+
+
+def delay_time_from_filename(filename: str):
+ """
+ Retrieve the delay time from the filename, if there is no delay time or the delay time
+ isn't a valid positive number or 0 number, then None is returned.
+
+ @param filename: String containing the file's name
+ @return: A valid delay time, if not then none
+ """
+ # Check to make sure there is a valid delay time between brackets
+ # of the filename
+ value = __value_from_filename(filename, "##", -1)
+
+ # Delay times should always be positive or zero
+ return value if value >= 0 else None
+
+
+def loop_from_filename(filename: str):
+ """
+ Retrieve the number of loops from filename, if there is no loop number or the loop number isn't valid,
+ then 1 is returned.
+
+ @param filename: String containing the file's name
+ @return: A valid loop number, if not then 1
+ """
+ # Check to make sure there is a valid delay time between brackets
+ # of the filename
+ value = __value_from_filename(filename, "@@", 1)
+
+ # Loop should always be positive
+ return value if value >= 1 else 1
+
+
+def comparison_method_from_filename(filename: str):
+ """
+ Retrieve the comparison method index from filename, if there is no comparison method or the index isn't valid,
+ then None is returned.
+
+ @param filename: String containing the file's name
+ @return: A valid comparison method index, if not then none
+ """
+ # Check to make sure there is a valid delay time between brackets
+ # of the filename
+ value = __value_from_filename(filename, "^^", -1)
+
+ # Comparison method should always be positive or zero
+ return value if value >= 0 else None
+
+
+def flags_from_filename(filename: str):
+ """
+ Retrieve the flags from the filename, if there are no flags then 0 is returned.
+
+ @param filename: String containing the file's name
+ @return: The flags as an integer, if invalid flags are found it returns 0
+
+ list of flags:
+ "d" = dummy, do nothing when this split is found
+ "b" = below threshold, after threshold is met, split when it goes below the threhsold.
+ "p" = pause, hit pause key when this split is found
+ """
+ # Check to make sure there are flags between curly braces
+ # of the filename
+ flags_str = __value_from_filename(filename, "{}", "")
+
+ if not flags_str:
+ return 0
+
+ flags = 0x00
+
+ for flag_str in flags_str:
+ match flag_str.upper():
+ case "D":
+ flags |= DUMMY_FLAG
+ case "B":
+ flags |= BELOW_FLAG
+ case "P":
+ flags |= PAUSE_FLAG
+ # Legacy flags
+ case "M":
+ continue
+ # An invalid flag was caught, this filename was written incorrectly return 0.
+ # We don't want to interpret any misleading filenames
+ case _:
+ return 0
+
+ # Check for any conflicting flags that were set
+ # For instance, we can't have a dummy split also pause
+ if (flags & DUMMY_FLAG == DUMMY_FLAG) and (flags & PAUSE_FLAG == PAUSE_FLAG):
+ return 0
+
+ return flags
+
+
+def __pop_image_type(split_image: list[AutoSplitImage], image_type: ImageType):
+ for image in split_image:
+ if image.image_type == image_type:
+ split_image.remove(image)
+ return image
+
+ return None
+
+
+def parse_and_validate_images(autosplit: "AutoSplit"):
+ # Get split images
+ all_images = [
+ AutoSplitImage(os.path.join(autosplit.settings_dict["split_image_directory"], image_name))
+ for image_name
+ in os.listdir(autosplit.settings_dict["split_image_directory"])
+ ]
+
+ # Find non-split images and then remove them from the list
+ start_image = __pop_image_type(all_images, ImageType.START)
+ reset_image = __pop_image_type(all_images, ImageType.RESET)
+ split_images = all_images
+
+ error_message: Callable[[], object] | None = None
+
+ # If there is no start hotkey set but a Start Image is present, and is not auto controlled, throw an error.
+ if (
+ start_image
+ and not autosplit.settings_dict["split_hotkey"]
+ and not autosplit.is_auto_controlled
+ ):
+ error_message = error_messages.load_start_image
+
+ # If there is no reset hotkey set but a Reset Image is present, and is not auto controlled, throw an error.
+ elif (
+ reset_image
+ and not autosplit.settings_dict["reset_hotkey"]
+ and not autosplit.is_auto_controlled
+ ):
+ error_message = error_messages.reset_hotkey
+
+ # Make sure that each of the images follows the guidelines for correct format
+ # according to all of the settings selected by the user.
+ else:
+ for image in split_images:
+ # Test for image without transparency
+ if not is_valid_image(image.byte_array):
+
+ def image_validity(filename: str):
+ return lambda: error_messages.image_validity(filename)
+
+ error_message = image_validity(image.filename)
+ break
+
+ # error out if there is a {p} flag but no pause hotkey set and is not auto controlled.
+ if (
+ not autosplit.settings_dict["pause_hotkey"]
+ and image.check_flag(PAUSE_FLAG)
+ and not autosplit.is_auto_controlled
+ ):
+ error_message = error_messages.pause_hotkey
+ break
+
+ # Check that there's only one Reset Image
+ if image.image_type == ImageType.RESET:
+ error_message = lambda: error_messages.multiple_keyword_images(RESET_KEYWORD) # noqa: E731
+ break
+
+ # Check that there's only one Start Image
+ if image.image_type == ImageType.START:
+ error_message = lambda: error_messages.multiple_keyword_images(START_KEYWORD) # noqa: E731
+ break
+
+ if error_message:
+ autosplit.start_image = None
+ autosplit.reset_image = None
+ autosplit.split_images = []
+ autosplit.gui_changes_on_reset()
+ error_message()
+ return False
+
+ autosplit.start_image = start_image
+ autosplit.reset_image = reset_image
+ autosplit.split_images = split_images
+ return True
diff --git a/src/user_profile.py b/src/user_profile.py
index dbb46aa1..c29220a0 100644
--- a/src/user_profile.py
+++ b/src/user_profile.py
@@ -1,217 +1,223 @@
-from __future__ import annotations
-
-import os
-from typing import TYPE_CHECKING, TypedDict, cast
-
-import toml
-from PySide6 import QtCore, QtWidgets
-
-import error_messages
-from capture_method import CAPTURE_METHODS, CaptureMethodEnum, Region, change_capture_method
-from gen import design
-from hotkeys import HOTKEYS, remove_all_hotkeys, set_hotkey
-from utils import auto_split_directory
-
-if TYPE_CHECKING:
- from AutoSplit import AutoSplit
-
-
-class UserProfileDict(TypedDict):
- split_hotkey: str
- reset_hotkey: str
- undo_split_hotkey: str
- skip_split_hotkey: str
- pause_hotkey: str
- screenshot_hotkey: str
- toggle_auto_reset_image_hotkey: str
- fps_limit: int
- live_capture_region: bool
- capture_method: str | CaptureMethodEnum
- capture_device_id: int
- capture_device_name: str
- default_comparison_method: int
- default_similarity_threshold: float
- default_delay_time: int
- default_pause_time: float
- loop_splits: bool
- start_also_resets: bool
- enable_auto_reset: bool
- split_image_directory: str
- screenshot_directory: str
- open_screenshot: bool
- captured_window_title: str
- capture_region: Region
-
-
-DEFAULT_PROFILE = UserProfileDict(
- split_hotkey="",
- reset_hotkey="",
- undo_split_hotkey="",
- skip_split_hotkey="",
- pause_hotkey="",
- screenshot_hotkey="",
- toggle_auto_reset_image_hotkey="",
- fps_limit=60,
- live_capture_region=True,
- capture_method=CAPTURE_METHODS.get_method_by_index(0),
- capture_device_id=0,
- capture_device_name="",
- default_comparison_method=0,
- default_similarity_threshold=0.95,
- default_delay_time=0,
- default_pause_time=10,
- loop_splits=False,
- start_also_resets=False,
- enable_auto_reset=True,
- split_image_directory="",
- screenshot_directory="",
- open_screenshot=True,
- captured_window_title="",
- capture_region=Region(x=0, y=0, width=1, height=1),
-)
-
-
-def have_settings_changed(autosplit: AutoSplit):
- return autosplit.settings_dict not in (autosplit.last_loaded_settings, autosplit.last_saved_settings)
-
-
-def save_settings(autosplit: AutoSplit):
- """@return: The save settings filepath. Or None if "Save Settings As" is cancelled."""
- return __save_settings_to_file(autosplit, autosplit.last_successfully_loaded_settings_file_path) \
- if autosplit.last_successfully_loaded_settings_file_path \
- else save_settings_as(autosplit)
-
-
-def save_settings_as(autosplit: AutoSplit):
- """@return: The save settings filepath selected. Empty if cancelled."""
- # User picks save destination
- save_settings_file_path = QtWidgets.QFileDialog.getSaveFileName(
- autosplit,
- "Save Settings As",
- autosplit.last_successfully_loaded_settings_file_path
- or os.path.join(auto_split_directory, "settings.toml"),
- "TOML (*.toml)",
- )[0]
- # If user cancels save destination window, don't save settings
- if not save_settings_file_path:
- return ""
-
- return __save_settings_to_file(autosplit, save_settings_file_path)
-
-
-def __save_settings_to_file(autosplit: AutoSplit, save_settings_file_path: str):
- autosplit.last_saved_settings = autosplit.settings_dict
- # Save settings to a .toml file
- with open(save_settings_file_path, "w", encoding="utf-8") as file:
- toml.dump(autosplit.last_saved_settings, file)
- autosplit.last_successfully_loaded_settings_file_path = save_settings_file_path
- return save_settings_file_path
-
-
-def __load_settings_from_file(autosplit: AutoSplit, load_settings_file_path: str):
- if load_settings_file_path.endswith(".pkl"):
- autosplit.show_error_signal.emit(error_messages.old_version_settings_file)
- return False
- try:
- with open(load_settings_file_path, encoding="utf-8") as file:
- # Casting here just so we can build an actual UserProfileDict once we're done validating
- # Fallback to default settings if some are missing from the file. This happens when new settings are added.
- loaded_settings = cast(
- UserProfileDict,
- {
- **DEFAULT_PROFILE,
- **toml.load(file),
- },
- )
- # TODO: Data Validation / fallbacks ?
- autosplit.settings_dict = UserProfileDict(**loaded_settings)
- autosplit.last_loaded_settings = autosplit.settings_dict
-
- autosplit.x_spinbox.setValue(autosplit.settings_dict["capture_region"]["x"])
- autosplit.y_spinbox.setValue(autosplit.settings_dict["capture_region"]["y"])
- autosplit.width_spinbox.setValue(autosplit.settings_dict["capture_region"]["width"])
- autosplit.height_spinbox.setValue(autosplit.settings_dict["capture_region"]["height"])
- autosplit.split_image_folder_input.setText(autosplit.settings_dict["split_image_directory"])
- except (FileNotFoundError, MemoryError, TypeError, toml.TomlDecodeError):
- autosplit.show_error_signal.emit(error_messages.invalid_settings)
- return False
-
- remove_all_hotkeys()
- if not autosplit.is_auto_controlled:
- for hotkey, hotkey_name in [(hotkey, f"{hotkey}_hotkey") for hotkey in HOTKEYS]:
- hotkey_value = autosplit.settings_dict.get(hotkey_name)
- if hotkey_value:
- set_hotkey(autosplit, hotkey, hotkey_value)
-
- change_capture_method(cast(CaptureMethodEnum, autosplit.settings_dict["capture_method"]), autosplit)
- if autosplit.settings_dict["capture_method"] != CaptureMethodEnum.VIDEO_CAPTURE_DEVICE:
- autosplit.capture_method.recover_window(autosplit.settings_dict["captured_window_title"], autosplit)
- if not autosplit.capture_method.check_selected_region_exists(autosplit):
- autosplit.live_image.setText(
- "Reload settings after opening"
- + f"\n{autosplit.settings_dict['captured_window_title']!r}"
- + "\nto automatically load Capture Region",
- )
-
- return True
-
-
-def load_settings(autosplit: AutoSplit, from_path: str = ""):
- load_settings_file_path = from_path or QtWidgets.QFileDialog.getOpenFileName(
- autosplit,
- "Load Profile",
- os.path.join(auto_split_directory, "settings.toml"),
- "TOML (*.toml)",
- )[0]
- if not (load_settings_file_path and __load_settings_from_file(autosplit, load_settings_file_path)):
- return
-
- autosplit.last_successfully_loaded_settings_file_path = load_settings_file_path
- # TODO: Should this check be in `__load_start_image` ?
- if not autosplit.is_running:
- autosplit.load_start_image_signal.emit(False, True)
-
-
-def load_settings_on_open(autosplit: AutoSplit):
- settings_files = [
- file for file
- in os.listdir(auto_split_directory)
- if file.endswith(".toml")
- ]
-
- # Find all .tomls in AutoSplit folder, error if there is not exactly 1
- error = None
- if len(settings_files) < 1:
- error = error_messages.no_settings_file_on_open
- elif len(settings_files) > 1:
- error = error_messages.too_many_settings_files_on_open
- if error:
- change_capture_method(CAPTURE_METHODS.get_method_by_index(0), autosplit)
- error()
- return
-
- load_settings(autosplit, os.path.join(auto_split_directory, settings_files[0]))
-
-
-def load_check_for_updates_on_open(autosplit: AutoSplit):
- """
- Retrieve the "Check For Updates On Open" QSettings and set the checkbox state
- These are only global settings values. They are not *toml settings values.
- """
- # Type not infered by PySide6
- # TODO: Report this issue upstream
- value = cast(
- bool,
- QtCore
- .QSettings("AutoSplit", "Check For Updates On Open")
- .value("check_for_updates_on_open", True, type=bool),
- )
- autosplit.action_check_for_updates_on_open.setChecked(value)
-
-
-def set_check_for_updates_on_open(design_window: design.Ui_MainWindow, value: bool):
- """Sets the "Check For Updates On Open" QSettings value and the checkbox state."""
- design_window.action_check_for_updates_on_open.setChecked(value)
- QtCore \
- .QSettings("AutoSplit", "Check For Updates On Open") \
- .setValue("check_for_updates_on_open", value)
+import os
+from typing import TYPE_CHECKING, TypedDict, cast
+
+import toml
+from PySide6 import QtCore, QtWidgets
+
+import error_messages
+from capture_method import CAPTURE_METHODS, CaptureMethodEnum, Region, change_capture_method
+from gen import design
+from hotkeys import HOTKEYS, remove_all_hotkeys, set_hotkey
+from utils import auto_split_directory
+
+if TYPE_CHECKING:
+ from AutoSplit import AutoSplit
+
+
+class UserProfileDict(TypedDict):
+ split_hotkey: str
+ reset_hotkey: str
+ undo_split_hotkey: str
+ skip_split_hotkey: str
+ pause_hotkey: str
+ screenshot_hotkey: str
+ toggle_auto_reset_image_hotkey: str
+ fps_limit: int
+ live_capture_region: bool
+ capture_method: str | CaptureMethodEnum
+ capture_device_id: int
+ capture_device_name: str
+ default_comparison_method: int
+ default_similarity_threshold: float
+ default_delay_time: int
+ default_pause_time: float
+ loop_splits: bool
+ start_also_resets: bool
+ enable_auto_reset: bool
+ split_image_directory: str
+ screenshot_directory: str
+ open_screenshot: bool
+ captured_window_title: str
+ capture_region: Region
+
+
+DEFAULT_PROFILE = UserProfileDict(
+ split_hotkey="",
+ reset_hotkey="",
+ undo_split_hotkey="",
+ skip_split_hotkey="",
+ pause_hotkey="",
+ screenshot_hotkey="",
+ toggle_auto_reset_image_hotkey="",
+ fps_limit=60,
+ live_capture_region=True,
+ capture_method=CAPTURE_METHODS.get_method_by_index(0),
+ capture_device_id=0,
+ capture_device_name="",
+ default_comparison_method=0,
+ default_similarity_threshold=0.95,
+ default_delay_time=0,
+ default_pause_time=10,
+ loop_splits=False,
+ start_also_resets=False,
+ enable_auto_reset=True,
+ split_image_directory="",
+ screenshot_directory="",
+ open_screenshot=True,
+ captured_window_title="",
+ capture_region=Region(x=0, y=0, width=1, height=1),
+)
+
+
+def have_settings_changed(autosplit: "AutoSplit"):
+ return (
+ autosplit.settings_dict != autosplit.last_saved_settings
+ or autosplit.settings_dict != autosplit.last_loaded_settings
+ )
+
+
+def save_settings(autosplit: "AutoSplit"):
+ """@return: The save settings filepath. Or None if "Save Settings As" is cancelled."""
+ return (
+ __save_settings_to_file(autosplit, autosplit.last_successfully_loaded_settings_file_path)
+ if autosplit.last_successfully_loaded_settings_file_path
+ else save_settings_as(autosplit)
+ )
+
+
+def save_settings_as(autosplit: "AutoSplit"):
+ """@return: The save settings filepath selected. Empty if cancelled."""
+ # User picks save destination
+ save_settings_file_path = QtWidgets.QFileDialog.getSaveFileName(
+ autosplit,
+ "Save Settings As",
+ autosplit.last_successfully_loaded_settings_file_path
+ or os.path.join(auto_split_directory, "settings.toml"),
+ "TOML (*.toml)",
+ )[0]
+ # If user cancels save destination window, don't save settings
+ if not save_settings_file_path:
+ return ""
+
+ return __save_settings_to_file(autosplit, save_settings_file_path)
+
+
+def __save_settings_to_file(autosplit: "AutoSplit", save_settings_file_path: str):
+ autosplit.last_saved_settings = autosplit.settings_dict
+ # Save settings to a .toml file
+ with open(save_settings_file_path, "w", encoding="utf-8") as file:
+ toml.dump(autosplit.last_saved_settings, file)
+ autosplit.last_successfully_loaded_settings_file_path = save_settings_file_path
+ return save_settings_file_path
+
+
+def __load_settings_from_file(autosplit: "AutoSplit", load_settings_file_path: str):
+ if load_settings_file_path.endswith(".pkl"):
+ autosplit.show_error_signal.emit(error_messages.old_version_settings_file)
+ return False
+ try:
+ with open(load_settings_file_path, encoding="utf-8") as file:
+ # Casting here just so we can build an actual UserProfileDict once we're done validating
+ # Fallback to default settings if some are missing from the file. This happens when new settings are added.
+ loaded_settings = cast(
+ UserProfileDict,
+ {
+ **DEFAULT_PROFILE,
+ **toml.load(file),
+ },
+ )
+ # TODO: Data Validation / fallbacks ?
+ autosplit.settings_dict = UserProfileDict(**loaded_settings)
+ autosplit.last_loaded_settings = autosplit.settings_dict
+
+ autosplit.x_spinbox.setValue(autosplit.settings_dict["capture_region"]["x"])
+ autosplit.y_spinbox.setValue(autosplit.settings_dict["capture_region"]["y"])
+ autosplit.width_spinbox.setValue(autosplit.settings_dict["capture_region"]["width"])
+ autosplit.height_spinbox.setValue(autosplit.settings_dict["capture_region"]["height"])
+ autosplit.split_image_folder_input.setText(autosplit.settings_dict["split_image_directory"])
+ except (FileNotFoundError, MemoryError, TypeError, toml.TomlDecodeError):
+ autosplit.show_error_signal.emit(error_messages.invalid_settings)
+ return False
+
+ remove_all_hotkeys()
+ if not autosplit.is_auto_controlled:
+ for hotkey, hotkey_name in [(hotkey, f"{hotkey}_hotkey") for hotkey in HOTKEYS]:
+ hotkey_value = autosplit.settings_dict.get(hotkey_name)
+ if hotkey_value:
+ set_hotkey(autosplit, hotkey, hotkey_value)
+
+ change_capture_method(cast(CaptureMethodEnum, autosplit.settings_dict["capture_method"]), autosplit)
+ if autosplit.settings_dict["capture_method"] != CaptureMethodEnum.VIDEO_CAPTURE_DEVICE:
+ autosplit.capture_method.recover_window(autosplit.settings_dict["captured_window_title"], autosplit)
+ if not autosplit.capture_method.check_selected_region_exists(autosplit):
+ autosplit.live_image.setText(
+ "Reload settings after opening"
+ + f"\n{autosplit.settings_dict['captured_window_title']!r}"
+ + "\nto automatically load Capture Region",
+ )
+
+ return True
+
+
+def load_settings(autosplit: "AutoSplit", from_path: str = ""):
+ load_settings_file_path = (
+ from_path
+ or QtWidgets.QFileDialog.getOpenFileName(
+ autosplit,
+ "Load Profile",
+ os.path.join(auto_split_directory, "settings.toml"),
+ "TOML (*.toml)",
+ )[0]
+ )
+ if not (load_settings_file_path and __load_settings_from_file(autosplit, load_settings_file_path)):
+ return
+
+ autosplit.last_successfully_loaded_settings_file_path = load_settings_file_path
+ # TODO: Should this check be in `__load_start_image` ?
+ if not autosplit.is_running:
+ autosplit.load_start_image_signal.emit(False, True)
+
+
+def load_settings_on_open(autosplit: "AutoSplit"):
+ settings_files = [
+ file for file
+ in os.listdir(auto_split_directory)
+ if file.endswith(".toml")
+ ]
+
+ # Find all .tomls in AutoSplit folder, error if there is not exactly 1
+ error = None
+ if len(settings_files) < 1:
+ error = error_messages.no_settings_file_on_open
+ elif len(settings_files) > 1:
+ error = error_messages.too_many_settings_files_on_open
+ if error:
+ change_capture_method(CAPTURE_METHODS.get_method_by_index(0), autosplit)
+ error()
+ return
+
+ load_settings(autosplit, os.path.join(auto_split_directory, settings_files[0]))
+
+
+def load_check_for_updates_on_open(autosplit: "AutoSplit"):
+ """
+ Retrieve the "Check For Updates On Open" QSettings and set the checkbox state
+ These are only global settings values. They are not *toml settings values.
+ """
+ # Type not infered by PySide6
+ # TODO: Report this issue upstream
+ value = cast(
+ bool,
+ QtCore
+ .QSettings("AutoSplit", "Check For Updates On Open")
+ .value("check_for_updates_on_open", True, type=bool),
+ )
+ autosplit.action_check_for_updates_on_open.setChecked(value)
+
+
+def set_check_for_updates_on_open(design_window: design.Ui_MainWindow, value: bool):
+ """Sets the "Check For Updates On Open" QSettings value and the checkbox state."""
+ design_window.action_check_for_updates_on_open.setChecked(value)
+ QtCore \
+ .QSettings("AutoSplit", "Check For Updates On Open") \
+ .setValue("check_for_updates_on_open", value)
diff --git a/src/utils.py b/src/utils.py
index 1851cdec..0e9338b4 100644
--- a/src/utils.py
+++ b/src/utils.py
@@ -1,192 +1,189 @@
-from __future__ import annotations
-
-import asyncio
-import ctypes
-import ctypes.wintypes
-import os
-import sys
-from collections.abc import Callable, Iterable
-from enum import IntEnum
-from itertools import chain
-from platform import version
-from threading import Thread
-from typing import TYPE_CHECKING, Any, TypeVar
-
-import win32ui
-from cv2.typing import MatLike
-from typing_extensions import TypeGuard
-from win32 import win32gui
-from winsdk.windows.ai.machinelearning import LearningModelDevice, LearningModelDeviceKind
-from winsdk.windows.media.capture import MediaCapture
-
-from gen.build_vars import AUTOSPLIT_BUILD_NUMBER, AUTOSPLIT_GITHUB_REPOSITORY
-
-if TYPE_CHECKING:
- # Source does not exist, keep this under TYPE_CHECKING
- from _win32typing import PyCDC # pyright: ignore[reportMissingModuleSource]
-
-_T = TypeVar("_T")
-
-
-DWMWA_EXTENDED_FRAME_BOUNDS = 9
-MAXBYTE = 255
-BGR_CHANNEL_COUNT = 3
-"""How many channels in an RGB image"""
-BGRA_CHANNEL_COUNT = 4
-"""How many channels in an RGBA image"""
-
-
-class ImageShape(IntEnum):
- Y = 0
- X = 1
- Channels = 2
-
-
-class ColorChannel(IntEnum):
- Blue = 0
- Green = 1
- Red = 2
- Alpha = 3
-
-
-def decimal(value: float):
- # Using ljust instead of :2f because of python float rounding errors
- return f"{int(value * 100) / 100}".ljust(4, "0")
-
-
-def is_digit(value: str | int | None):
- """Checks if `value` is a single-digit string from 0-9."""
- if value is None:
- return False
- try:
- return 0 <= int(value) <= 9 # noqa: PLR2004
- except (ValueError, TypeError):
- return False
-
-
-def is_valid_image(image: MatLike | None) -> TypeGuard[MatLike]:
- return image is not None and bool(image.size)
-
-
-def is_valid_hwnd(hwnd: int) -> bool:
- """Validate the hwnd points to a valid window and not the desktop or whatever window obtained with `""`."""
- if not hwnd:
- return False
- if sys.platform == "win32":
- return bool(win32gui.IsWindow(hwnd) and win32gui.GetWindowText(hwnd))
- return True
-
-
-T = TypeVar("T")
-
-
-def first(iterable: Iterable[T]) -> T:
- """@return: The first element of a collection. Dictionaries will return the first key."""
- return next(iter(iterable))
-
-
-def try_delete_dc(dc: PyCDC):
- try:
- dc.DeleteDC()
- except win32ui.error:
- pass
-
-
-def get_window_bounds(hwnd: int) -> tuple[int, int, int, int]:
- extended_frame_bounds = ctypes.wintypes.RECT()
- ctypes.windll.dwmapi.DwmGetWindowAttribute(
- hwnd,
- DWMWA_EXTENDED_FRAME_BOUNDS,
- ctypes.byref(extended_frame_bounds),
- ctypes.sizeof(extended_frame_bounds),
- )
-
- window_rect = win32gui.GetWindowRect(hwnd)
- window_left_bounds = extended_frame_bounds.left - window_rect[0]
- window_top_bounds = extended_frame_bounds.top - window_rect[1]
- window_width = extended_frame_bounds.right - extended_frame_bounds.left
- window_height = extended_frame_bounds.bottom - extended_frame_bounds.top
- return window_left_bounds, window_top_bounds, window_width, window_height
-
-
-def open_file(file_path: str | bytes | os.PathLike[str] | os.PathLike[bytes]):
- os.startfile(file_path) # noqa: S606
-
-
-def get_or_create_eventloop():
- try:
- return asyncio.get_event_loop()
- except RuntimeError:
- loop = asyncio.new_event_loop()
- asyncio.set_event_loop(loop)
- return asyncio.get_event_loop()
-
-
-def get_direct3d_device():
- # Note: Must create in the same thread (can't use a global) otherwise when ran from LiveSplit it will raise:
- # OSError: The application called an interface that was marshalled for a different thread
- media_capture = MediaCapture()
-
- async def init_mediacapture():
- await (media_capture.initialize_async() or asyncio.sleep(0))
-
- asyncio.run(init_mediacapture())
- direct_3d_device = media_capture.media_capture_settings and media_capture.media_capture_settings.direct3_d11_device
- if not direct_3d_device:
- try:
- # May be problematic? https://github.com/pywinrt/python-winsdk/issues/11#issuecomment-1315345318
- direct_3d_device = LearningModelDevice(LearningModelDeviceKind.DIRECT_X_HIGH_PERFORMANCE).direct3_d11_device
- # TODO: Unknown potential error, I don't have an older Win10 machine to test.
- except BaseException: # noqa: S110,BLE001
- pass
- if not direct_3d_device:
- raise OSError("Unable to initialize a Direct3D Device.")
- return direct_3d_device
-
-
-def try_get_direct3d_device():
- try:
- return get_direct3d_device()
- except OSError:
- return None
-
-
-def fire_and_forget(func: Callable[..., Any]):
- """
- Runs synchronous function asynchronously without waiting for a response.
-
- Uses threads on Windows because ~~`RuntimeError: There is no current event loop in thread 'MainThread'.`~~
- Because maybe asyncio has issues. Unsure. See alpha.5 and https://github.com/Avasam/AutoSplit/issues/36
-
- Uses asyncio on Linux because of a `Segmentation fault (core dumped)`
- """
-
- def wrapped(*args: Any, **kwargs: Any):
- if sys.platform == "win32":
- thread = Thread(target=func, args=args, kwargs=kwargs)
- thread.start()
- return thread
- return get_or_create_eventloop().run_in_executor(None, func, *args, *kwargs)
-
- return wrapped
-
-
-def flatten(nested_iterable: Iterable[Iterable[_T]]) -> chain[_T]:
- return chain(*nested_iterable)
-
-
-# Environment specifics
-WINDOWS_BUILD_NUMBER = int(version().split(".")[-1]) if sys.platform == "win32" else -1
-FIRST_WIN_11_BUILD = 22000
-"""AutoSplit Version number"""
-WGC_MIN_BUILD = 17134
-"""https://docs.microsoft.com/en-us/uwp/api/windows.graphics.capture.graphicscapturepicker#applies-to"""
-FROZEN = hasattr(sys, "frozen")
-"""Running from build made by PyInstaller"""
-auto_split_directory = os.path.dirname(sys.executable if FROZEN else os.path.abspath(__file__))
-"""The directory of either the AutoSplit executable or AutoSplit.py"""
-
-# Shared strings
-# Check `excludeBuildNumber` during workflow dispatch build generate a clean version number
-AUTOSPLIT_VERSION = "2.2.0" + (f"-{AUTOSPLIT_BUILD_NUMBER}" if AUTOSPLIT_BUILD_NUMBER else "")
-GITHUB_REPOSITORY = AUTOSPLIT_GITHUB_REPOSITORY
+import asyncio
+import ctypes
+import ctypes.wintypes
+import os
+import sys
+from collections.abc import Callable, Iterable
+from enum import IntEnum
+from itertools import chain
+from platform import version
+from threading import Thread
+from typing import TYPE_CHECKING, Any, TypeGuard, TypeVar
+
+import win32ui
+from cv2.typing import MatLike
+from win32 import win32gui
+from winsdk.windows.ai.machinelearning import LearningModelDevice, LearningModelDeviceKind
+from winsdk.windows.media.capture import MediaCapture
+
+from gen.build_vars import AUTOSPLIT_BUILD_NUMBER, AUTOSPLIT_GITHUB_REPOSITORY
+
+if TYPE_CHECKING:
+ # Source does not exist, keep this under TYPE_CHECKING
+ from _win32typing import PyCDC # pyright: ignore[reportMissingModuleSource]
+
+_T = TypeVar("_T")
+
+
+DWMWA_EXTENDED_FRAME_BOUNDS = 9
+MAXBYTE = 255
+BGR_CHANNEL_COUNT = 3
+"""How many channels in an RGB image"""
+BGRA_CHANNEL_COUNT = 4
+"""How many channels in an RGBA image"""
+
+
+class ImageShape(IntEnum):
+ Y = 0
+ X = 1
+ Channels = 2
+
+
+class ColorChannel(IntEnum):
+ Blue = 0
+ Green = 1
+ Red = 2
+ Alpha = 3
+
+
+def decimal(value: float):
+ # Using ljust instead of :2f because of python float rounding errors
+ return f"{int(value * 100) / 100}".ljust(4, "0")
+
+
+def is_digit(value: str | int | None):
+ """Checks if `value` is a single-digit string from 0-9."""
+ if value is None:
+ return False
+ try:
+ return 0 <= int(value) <= 9 # noqa: PLR2004
+ except (ValueError, TypeError):
+ return False
+
+
+def is_valid_image(image: MatLike | None) -> TypeGuard[MatLike]:
+ return image is not None and bool(image.size)
+
+
+def is_valid_hwnd(hwnd: int) -> bool:
+ """Validate the hwnd points to a valid window and not the desktop or whatever window obtained with `""`."""
+ if not hwnd:
+ return False
+ if sys.platform == "win32":
+ return bool(win32gui.IsWindow(hwnd) and win32gui.GetWindowText(hwnd))
+ return True
+
+
+T = TypeVar("T")
+
+
+def first(iterable: Iterable[T]) -> T:
+ """@return: The first element of a collection. Dictionaries will return the first key."""
+ return next(iter(iterable))
+
+
+def try_delete_dc(dc: "PyCDC"):
+ try:
+ dc.DeleteDC()
+ except win32ui.error:
+ pass
+
+
+def get_window_bounds(hwnd: int) -> tuple[int, int, int, int]:
+ extended_frame_bounds = ctypes.wintypes.RECT()
+ ctypes.windll.dwmapi.DwmGetWindowAttribute(
+ hwnd,
+ DWMWA_EXTENDED_FRAME_BOUNDS,
+ ctypes.byref(extended_frame_bounds),
+ ctypes.sizeof(extended_frame_bounds),
+ )
+
+ window_rect = win32gui.GetWindowRect(hwnd)
+ window_left_bounds = extended_frame_bounds.left - window_rect[0]
+ window_top_bounds = extended_frame_bounds.top - window_rect[1]
+ window_width = extended_frame_bounds.right - extended_frame_bounds.left
+ window_height = extended_frame_bounds.bottom - extended_frame_bounds.top
+ return window_left_bounds, window_top_bounds, window_width, window_height
+
+
+def open_file(file_path: str | bytes | os.PathLike[str] | os.PathLike[bytes]):
+ os.startfile(file_path) # noqa: S606
+
+
+def get_or_create_eventloop():
+ try:
+ return asyncio.get_event_loop()
+ except RuntimeError:
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ return asyncio.get_event_loop()
+
+
+def get_direct3d_device():
+ # Note: Must create in the same thread (can't use a global) otherwise when ran from LiveSplit it will raise:
+ # OSError: The application called an interface that was marshalled for a different thread
+ media_capture = MediaCapture()
+
+ async def init_mediacapture():
+ await (media_capture.initialize_async() or asyncio.sleep(0))
+
+ asyncio.run(init_mediacapture())
+ direct_3d_device = media_capture.media_capture_settings and media_capture.media_capture_settings.direct3_d11_device
+ if not direct_3d_device:
+ try:
+ # May be problematic? https://github.com/pywinrt/python-winsdk/issues/11#issuecomment-1315345318
+ direct_3d_device = LearningModelDevice(LearningModelDeviceKind.DIRECT_X_HIGH_PERFORMANCE).direct3_d11_device
+ # TODO: Unknown potential error, I don't have an older Win10 machine to test.
+ except BaseException: # noqa: S110,BLE001
+ pass
+ if not direct_3d_device:
+ raise OSError("Unable to initialize a Direct3D Device.")
+ return direct_3d_device
+
+
+def try_get_direct3d_device():
+ try:
+ return get_direct3d_device()
+ except OSError:
+ return None
+
+
+def fire_and_forget(func: Callable[..., Any]):
+ """
+ Runs synchronous function asynchronously without waiting for a response.
+
+ Uses threads on Windows because ~~`RuntimeError: There is no current event loop in thread 'MainThread'.`~~
+ Because maybe asyncio has issues. Unsure. See alpha.5 and https://github.com/Avasam/AutoSplit/issues/36
+
+ Uses asyncio on Linux because of a `Segmentation fault (core dumped)`
+ """
+
+ def wrapped(*args: Any, **kwargs: Any):
+ if sys.platform == "win32":
+ thread = Thread(target=func, args=args, kwargs=kwargs)
+ thread.start()
+ return thread
+ return get_or_create_eventloop().run_in_executor(None, func, *args, *kwargs)
+
+ return wrapped
+
+
+def flatten(nested_iterable: Iterable[Iterable[_T]]) -> chain[_T]:
+ return chain(*nested_iterable)
+
+
+# Environment specifics
+WINDOWS_BUILD_NUMBER = int(version().split(".")[-1]) if sys.platform == "win32" else -1
+FIRST_WIN_11_BUILD = 22000
+"""AutoSplit Version number"""
+WGC_MIN_BUILD = 17134
+"""https://docs.microsoft.com/en-us/uwp/api/windows.graphics.capture.graphicscapturepicker#applies-to"""
+FROZEN = hasattr(sys, "frozen")
+"""Running from build made by PyInstaller"""
+auto_split_directory = os.path.dirname(sys.executable if FROZEN else os.path.abspath(__file__))
+"""The directory of either the AutoSplit executable or AutoSplit.py"""
+
+# Shared strings
+# Check `excludeBuildNumber` during workflow dispatch build generate a clean version number
+AUTOSPLIT_VERSION = "2.2.0" + (f"-{AUTOSPLIT_BUILD_NUMBER}" if AUTOSPLIT_BUILD_NUMBER else "")
+GITHUB_REPOSITORY = AUTOSPLIT_GITHUB_REPOSITORY
diff --git a/typings/cv2/__init__.pyi b/typings/cv2/__init__.pyi
index 4fa2b158..28ef8445 100644
--- a/typings/cv2/__init__.pyi
+++ b/typings/cv2/__init__.pyi
@@ -1622,7 +1622,6 @@ MediaFormat = int
"""One of [MediaFormat_BGR, MEDIA_FORMAT_BGR, MediaFormat_NV12, MEDIA_FORMAT_NV12, MediaFormat_GRAY,
MEDIA_FORMAT_GRAY]"""
-
FileStorage_READ: int
FILE_STORAGE_READ: int
FileStorage_WRITE: int
@@ -2180,32 +2179,23 @@ RMAT_ACCESS_W: int
RMat_Access = int
"""One of [RMat_Access_R, RMAT_ACCESS_R, RMat_Access_W, RMAT_ACCESS_W]"""
-
# Classes
class Algorithm:
# Functions
def clear(self) -> None: ...
-
@typing.overload
def write(self, fs: FileStorage) -> None: ...
@typing.overload
def write(self, fs: FileStorage, name: str) -> None: ...
-
def read(self, fn: FileNode) -> None: ...
-
def empty(self) -> bool: ...
-
def save(self, filename: str) -> None: ...
-
def getDefaultName(self) -> str: ...
-
class AsyncArray:
# Functions
def __init__(self) -> None: ...
-
def release(self) -> None: ...
-
@typing.overload
def get(self, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
@@ -2214,33 +2204,22 @@ class AsyncArray:
def get(self, timeoutNs: float, dst: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ...
@typing.overload
def get(self, timeoutNs: float, dst: UMat | None = ...) -> tuple[bool, UMat]: ...
-
def wait_for(self, timeoutNs: float) -> bool: ...
-
def valid(self) -> bool: ...
-
class FileStorage:
# Functions
@typing.overload
def __init__(self) -> None: ...
@typing.overload
def __init__(self, filename: str, flags: int, encoding: str = ...) -> None: ...
-
def open(self, filename: str, flags: int, encoding: str = ...) -> bool: ...
-
def isOpened(self) -> bool: ...
-
def release(self) -> None: ...
-
def releaseAndGetString(self) -> str: ...
-
def getFirstTopLevelNode(self) -> FileNode: ...
-
def root(self, streamidx: int = ...) -> FileNode: ...
-
def getNode(self, nodename: str) -> FileNode: ...
-
@typing.overload
def write(self, name: str, val: int) -> None: ...
@typing.overload
@@ -2251,57 +2230,33 @@ class FileStorage:
def write(self, name: str, val: cv2.typing.MatLike) -> None: ...
@typing.overload
def write(self, name: str, val: typing.Sequence[str]) -> None: ...
-
def writeComment(self, comment: str, append: bool = ...) -> None: ...
-
def startWriteStruct(self, name: str, flags: int, typeName: str = ...) -> None: ...
-
def endWriteStruct(self) -> None: ...
-
def getFormat(self) -> int: ...
-
class FileNode:
# Functions
def __init__(self) -> None: ...
-
def getNode(self, nodename: str) -> FileNode: ...
-
def at(self, i: int) -> FileNode: ...
-
def keys(self) -> typing.Sequence[str]: ...
-
def type(self) -> int: ...
-
def empty(self) -> bool: ...
-
def isNone(self) -> bool: ...
-
def isSeq(self) -> bool: ...
-
def isMap(self) -> bool: ...
-
def isInt(self) -> bool: ...
-
def isReal(self) -> bool: ...
-
def isString(self) -> bool: ...
-
def isNamed(self) -> bool: ...
-
def name(self) -> str: ...
-
def size(self) -> int: ...
-
def rawSize(self) -> int: ...
-
def real(self) -> float: ...
-
def string(self) -> str: ...
-
def mat(self) -> cv2.typing.MatLike: ...
-
class RotatedRect:
center: cv2.typing.Point2f
size: cv2.typing.Size2f
@@ -2314,12 +2269,9 @@ class RotatedRect:
def __init__(self, center: cv2.typing.Point2f, size: cv2.typing.Size2f, angle: float) -> None: ...
@typing.overload
def __init__(self, point1: cv2.typing.Point2f, point2: cv2.typing.Point2f, point3: cv2.typing.Point2f) -> None: ...
-
def points(self) -> typing.Sequence[cv2.typing.Point2f]: ...
-
def boundingRect(self) -> cv2.typing.Rect: ...
-
class KeyPoint:
pt: cv2.typing.Point2f
size: float
@@ -2331,7 +2283,6 @@ class KeyPoint:
# Functions
@typing.overload
def __init__(self) -> None: ...
-
@typing.overload
def __init__(
self,
@@ -2343,14 +2294,12 @@ class KeyPoint:
octave: int = ...,
class_id: int = ...,
) -> None: ...
-
@staticmethod
@typing.overload
def convert(
- keypoints: typing.Sequence[KeyPoint], keypointIndexes: typing.Sequence[int]
- = ...,
+ keypoints: typing.Sequence[KeyPoint],
+ keypointIndexes: typing.Sequence[int] = ...,
) -> typing.Sequence[cv2.typing.Point2f]: ...
-
@staticmethod
@typing.overload
def convert(
@@ -2360,11 +2309,9 @@ class KeyPoint:
octave: int = ...,
class_id: int = ...,
) -> typing.Sequence[KeyPoint]: ...
-
@staticmethod
def overlap(kp1: KeyPoint, kp2: KeyPoint) -> float: ...
-
class DMatch:
queryIdx: int
trainIdx: int
@@ -2379,34 +2326,21 @@ class DMatch:
@typing.overload
def __init__(self, _queryIdx: int, _trainIdx: int, _imgIdx: int, _distance: float) -> None: ...
-
class TickMeter:
# Functions
def __init__(self) -> None: ...
-
def start(self) -> None: ...
-
def stop(self) -> None: ...
-
def getTimeTicks(self) -> int: ...
-
def getTimeMicro(self) -> float: ...
-
def getTimeMilli(self) -> float: ...
-
def getTimeSec(self) -> float: ...
-
def getCounter(self) -> int: ...
-
def getFPS(self) -> float: ...
-
def getAvgTimeSec(self) -> float: ...
-
def getAvgTimeMilli(self) -> float: ...
-
def reset(self) -> None: ...
-
class UMat:
offset: int
@@ -2417,7 +2351,6 @@ class UMat:
def __init__(self, rows: int, cols: int, type: int, usageFlags: UMatUsageFlags = ...) -> None: ...
@typing.overload
def __init__(self, size: cv2.typing.Size, type: int, usageFlags: UMatUsageFlags = ...) -> None: ...
-
@typing.overload
def __init__(
self,
@@ -2427,7 +2360,6 @@ class UMat:
s: cv2.typing.Scalar,
usageFlags: UMatUsageFlags = ...,
) -> None: ...
-
@typing.overload
def __init__(
self,
@@ -2436,7 +2368,6 @@ class UMat:
s: cv2.typing.Scalar,
usageFlags: UMatUsageFlags = ...,
) -> None: ...
-
@typing.overload
def __init__(self, m: UMat) -> None: ...
@typing.overload
@@ -2445,46 +2376,31 @@ class UMat:
def __init__(self, m: UMat, roi: cv2.typing.Rect) -> None: ...
@typing.overload
def __init__(self, m: UMat, ranges: typing.Sequence[cv2.typing.Range]) -> None: ...
-
@staticmethod
def queue() -> cv2.typing.IntPointer: ...
-
@staticmethod
def context() -> cv2.typing.IntPointer: ...
-
def get(self) -> cv2.typing.MatLike: ...
-
def isContinuous(self) -> bool: ...
-
def isSubmatrix(self) -> bool: ...
-
def handle(self, accessFlags: AccessFlag) -> cv2.typing.IntPointer: ...
-
class Subdiv2D:
# Functions
@typing.overload
def __init__(self) -> None: ...
@typing.overload
def __init__(self, rect: cv2.typing.Rect) -> None: ...
-
def initDelaunay(self, rect: cv2.typing.Rect) -> None: ...
-
@typing.overload
def insert(self, pt: cv2.typing.Point2f) -> int: ...
@typing.overload
def insert(self, ptvec: typing.Sequence[cv2.typing.Point2f]) -> None: ...
-
def locate(self, pt: cv2.typing.Point2f) -> tuple[int, int, int]: ...
-
def findNearest(self, pt: cv2.typing.Point2f) -> tuple[int, cv2.typing.Point2f]: ...
-
def getEdgeList(self) -> typing.Sequence[cv2.typing.Vec4f]: ...
-
def getLeadingEdgeList(self) -> typing.Sequence[int]: ...
-
def getTriangleList(self) -> typing.Sequence[cv2.typing.Vec6f]: ...
-
def getVoronoiFacetList(
self,
idx: typing.Sequence[int],
@@ -2492,42 +2408,32 @@ class Subdiv2D:
typing.Sequence[typing.Sequence[cv2.typing.Point2f]],
typing.Sequence[cv2.typing.Point2f],
]: ...
-
def getVertex(self, vertex: int) -> tuple[cv2.typing.Point2f, int]: ...
-
def getEdge(self, edge: int, nextEdgeType: int) -> int: ...
-
def nextEdge(self, edge: int) -> int: ...
-
def rotateEdge(self, edge: int, rotate: int) -> int: ...
-
def symEdge(self, edge: int) -> int: ...
-
def edgeOrg(self, edge: int) -> tuple[int, cv2.typing.Point2f]: ...
-
def edgeDst(self, edge: int) -> tuple[int, cv2.typing.Point2f]: ...
-
class Feature2D:
# Functions
@typing.overload
def detect(self, image: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> typing.Sequence[KeyPoint]: ...
@typing.overload
def detect(self, image: UMat, mask: UMat | None = ...) -> typing.Sequence[KeyPoint]: ...
-
@typing.overload
def detect(
self,
images: typing.Sequence[cv2.typing.MatLike],
masks: typing.Sequence[cv2.typing.MatLike] | None = ...,
) -> typing.Sequence[typing.Sequence[KeyPoint]]: ...
-
@typing.overload
def detect(
- self, images: typing.Sequence[UMat], masks: typing.Sequence[UMat]
- | None = ...,
+ self,
+ images: typing.Sequence[UMat],
+ masks: typing.Sequence[UMat] | None = ...,
) -> typing.Sequence[typing.Sequence[KeyPoint]]: ...
-
@typing.overload
def compute(
self,
@@ -2538,7 +2444,6 @@ class Feature2D:
typing.Sequence[KeyPoint],
cv2.typing.MatLike,
]: ...
-
@typing.overload
def compute(
self,
@@ -2549,7 +2454,6 @@ class Feature2D:
typing.Sequence[KeyPoint],
UMat,
]: ...
-
@typing.overload
def compute(
self,
@@ -2560,7 +2464,6 @@ class Feature2D:
typing.Sequence[typing.Sequence[KeyPoint]],
typing.Sequence[cv2.typing.MatLike],
]: ...
-
@typing.overload
def compute(
self,
@@ -2571,7 +2474,6 @@ class Feature2D:
typing.Sequence[typing.Sequence[KeyPoint]],
typing.Sequence[UMat],
]: ...
-
@typing.overload
def detectAndCompute(
self,
@@ -2583,7 +2485,6 @@ class Feature2D:
typing.Sequence[KeyPoint],
cv2.typing.MatLike,
]: ...
-
@typing.overload
def detectAndCompute(
self,
@@ -2595,64 +2496,45 @@ class Feature2D:
typing.Sequence[KeyPoint],
UMat,
]: ...
-
def descriptorSize(self) -> int: ...
-
def descriptorType(self) -> int: ...
-
def defaultNorm(self) -> int: ...
-
@typing.overload
def write(self, fileName: str) -> None: ...
@typing.overload
def write(self, fs: FileStorage, name: str) -> None: ...
-
@typing.overload
def read(self, fileName: str) -> None: ...
@typing.overload
def read(self, arg1: FileNode) -> None: ...
-
def empty(self) -> bool: ...
-
def getDefaultName(self) -> str: ...
-
class BOWTrainer:
# Functions
def add(self, descriptors: cv2.typing.MatLike) -> None: ...
-
def getDescriptors(self) -> typing.Sequence[cv2.typing.MatLike]: ...
-
def descriptorsCount(self) -> int: ...
-
def clear(self) -> None: ...
-
@typing.overload
def cluster(self) -> cv2.typing.MatLike: ...
@typing.overload
def cluster(self, descriptors: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
-
class BOWImgDescriptorExtractor:
# Functions
def __init__(self, dextractor: cv2.typing.DescriptorExtractor, dmatcher: DescriptorMatcher) -> None: ...
-
def setVocabulary(self, vocabulary: cv2.typing.MatLike) -> None: ...
-
def getVocabulary(self) -> cv2.typing.MatLike: ...
-
def compute(
self,
image: cv2.typing.MatLike,
keypoints: typing.Sequence[KeyPoint],
imgDescriptor: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
def descriptorSize(self) -> int: ...
-
def descriptorType(self) -> int: ...
-
class VideoCapture:
# Functions
@typing.overload
@@ -2665,7 +2547,6 @@ class VideoCapture:
def __init__(self, index: int, apiPreference: int = ...) -> None: ...
@typing.overload
def __init__(self, index: int, apiPreference: int, params: typing.Sequence[int]) -> None: ...
-
@typing.overload
def open(self, filename: str, apiPreference: int = ...) -> bool: ...
@typing.overload
@@ -2674,42 +2555,29 @@ class VideoCapture:
def open(self, index: int, apiPreference: int = ...) -> bool: ...
@typing.overload
def open(self, index: int, apiPreference: int, params: typing.Sequence[int]) -> bool: ...
-
def isOpened(self) -> bool: ...
-
def release(self) -> None: ...
-
def grab(self) -> bool: ...
-
@typing.overload
def retrieve(self, image: cv2.typing.MatLike | None = ..., flag: int = ...) -> tuple[bool, cv2.typing.MatLike]: ...
@typing.overload
def retrieve(self, image: UMat | None = ..., flag: int = ...) -> tuple[bool, UMat]: ...
-
@typing.overload
def read(self, image: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ...
@typing.overload
def read(self, image: UMat | None = ...) -> tuple[bool, UMat]: ...
-
def set(self, propId: int, value: float) -> bool: ...
-
def get(self, propId: int) -> float: ...
-
def getBackendName(self) -> str: ...
-
def setExceptionMode(self, enable: bool) -> None: ...
-
def getExceptionMode(self) -> bool: ...
-
@staticmethod
def waitAny(streams: typing.Sequence[VideoCapture], timeoutNs: int = ...) -> tuple[bool, typing.Sequence[int]]: ...
-
class VideoWriter:
# Functions
@typing.overload
def __init__(self) -> None: ...
-
@typing.overload
def __init__(
self,
@@ -2719,7 +2587,6 @@ class VideoWriter:
frameSize: cv2.typing.Size,
isColor: bool = ...,
) -> None: ...
-
@typing.overload
def __init__(
self,
@@ -2730,7 +2597,6 @@ class VideoWriter:
frameSize: cv2.typing.Size,
isColor: bool = ...,
) -> None: ...
-
@typing.overload
def __init__(
self,
@@ -2740,22 +2606,28 @@ class VideoWriter:
frameSize: cv2.typing.Size,
params: typing.Sequence[int],
) -> None: ...
-
@typing.overload
def __init__(
- self, filename: str, apiPreference: int, fourcc: int, fps: float,
- frameSize: cv2.typing.Size, params: typing.Sequence[int],
+ self,
+ filename: str,
+ apiPreference: int,
+ fourcc: int,
+ fps: float,
+ frameSize: cv2.typing.Size,
+ params: typing.Sequence[int],
) -> None: ...
-
@typing.overload
def open(self, filename: str, fourcc: int, fps: float, frameSize: cv2.typing.Size, isColor: bool = ...) -> bool: ...
-
@typing.overload
def open(
- self, filename: str, apiPreference: int, fourcc: int, fps: float,
- frameSize: cv2.typing.Size, isColor: bool = ...,
+ self,
+ filename: str,
+ apiPreference: int,
+ fourcc: int,
+ fps: float,
+ frameSize: cv2.typing.Size,
+ isColor: bool = ...,
) -> bool: ...
-
@typing.overload
def open(
self,
@@ -2765,32 +2637,28 @@ class VideoWriter:
frameSize: cv2.typing.Size,
params: typing.Sequence[int],
) -> bool: ...
-
@typing.overload
def open(
- self, filename: str, apiPreference: int, fourcc: int, fps: float,
- frameSize: cv2.typing.Size, params: typing.Sequence[int],
+ self,
+ filename: str,
+ apiPreference: int,
+ fourcc: int,
+ fps: float,
+ frameSize: cv2.typing.Size,
+ params: typing.Sequence[int],
) -> bool: ...
-
def isOpened(self) -> bool: ...
-
def release(self) -> None: ...
-
@typing.overload
def write(self, image: cv2.typing.MatLike) -> None: ...
@typing.overload
def write(self, image: UMat) -> None: ...
-
def set(self, propId: int, value: float) -> bool: ...
-
def get(self, propId: int) -> float: ...
-
@staticmethod
def fourcc(c1: str, c2: str, c3: str, c4: str) -> int: ...
-
def getBackendName(self) -> str: ...
-
class UsacParams:
confidence: float
isParallel: bool
@@ -2809,7 +2677,6 @@ class UsacParams:
# Functions
def __init__(self) -> None: ...
-
class CirclesGridFinderParameters:
densityNeighborhoodSize: cv2.typing.Size2f
minDensity: float
@@ -2830,20 +2697,15 @@ class CirclesGridFinderParameters:
# Functions
def __init__(self) -> None: ...
-
class CascadeClassifier:
# Functions
@typing.overload
def __init__(self) -> None: ...
@typing.overload
def __init__(self, filename: str) -> None: ...
-
def empty(self) -> bool: ...
-
def load(self, filename: str) -> bool: ...
-
def read(self, node: FileNode) -> bool: ...
-
@typing.overload
def detectMultiScale(
self,
@@ -2854,7 +2716,6 @@ class CascadeClassifier:
minSize: cv2.typing.Size = ...,
maxSize: cv2.typing.Size = ...,
) -> typing.Sequence[cv2.typing.Rect]: ...
-
@typing.overload
def detectMultiScale(
self,
@@ -2865,7 +2726,6 @@ class CascadeClassifier:
minSize: cv2.typing.Size = ...,
maxSize: cv2.typing.Size = ...,
) -> typing.Sequence[cv2.typing.Rect]: ...
-
@typing.overload
def detectMultiScale2(
self,
@@ -2879,7 +2739,6 @@ class CascadeClassifier:
typing.Sequence[cv2.typing.Rect],
typing.Sequence[int],
]: ...
-
@typing.overload
def detectMultiScale2(
self,
@@ -2893,7 +2752,6 @@ class CascadeClassifier:
typing.Sequence[cv2.typing.Rect],
typing.Sequence[int],
]: ...
-
@typing.overload
def detectMultiScale3(
self,
@@ -2909,7 +2767,6 @@ class CascadeClassifier:
typing.Sequence[int],
typing.Sequence[float],
]: ...
-
@typing.overload
def detectMultiScale3(
self,
@@ -2925,17 +2782,12 @@ class CascadeClassifier:
typing.Sequence[int],
typing.Sequence[float],
]: ...
-
def isOldFormatCascade(self) -> bool: ...
-
def getOriginalWindowSize(self) -> cv2.typing.Size: ...
-
def getFeatureType(self) -> int: ...
-
@staticmethod
def convert(oldcascade: str, newcascade: str) -> bool: ...
-
class HOGDescriptor:
@property
def winSize(self) -> cv2.typing.Size: ...
@@ -2967,7 +2819,6 @@ class HOGDescriptor:
# Functions
@typing.overload
def __init__(self) -> None: ...
-
@typing.overload
def __init__(
self,
@@ -2984,37 +2835,33 @@ class HOGDescriptor:
_nlevels: int = ...,
_signedGradient: bool = ...,
) -> None: ...
-
@typing.overload
def __init__(self, filename: str) -> None: ...
-
def getDescriptorSize(self) -> int: ...
-
def checkDetectorSize(self) -> bool: ...
-
def getWinSigma(self) -> float: ...
-
@typing.overload
def setSVMDetector(self, svmdetector: cv2.typing.MatLike) -> None: ...
@typing.overload
def setSVMDetector(self, svmdetector: UMat) -> None: ...
-
def load(self, filename: str, objname: str = ...) -> bool: ...
-
def save(self, filename: str, objname: str = ...) -> None: ...
-
@typing.overload
def compute(
- self, img: cv2.typing.MatLike, winStride: cv2.typing.Size = ..., padding: cv2.typing.Size = ...,
+ self,
+ img: cv2.typing.MatLike,
+ winStride: cv2.typing.Size = ...,
+ padding: cv2.typing.Size = ...,
locations: typing.Sequence[cv2.typing.Point] = ...,
) -> typing.Sequence[float]: ...
-
@typing.overload
def compute(
- self, img: UMat, winStride: cv2.typing.Size = ..., padding: cv2.typing.Size = ...,
+ self,
+ img: UMat,
+ winStride: cv2.typing.Size = ...,
+ padding: cv2.typing.Size = ...,
locations: typing.Sequence[cv2.typing.Point] = ...,
) -> typing.Sequence[float]: ...
-
@typing.overload
def detect(
self,
@@ -3027,7 +2874,6 @@ class HOGDescriptor:
typing.Sequence[cv2.typing.Point],
typing.Sequence[float],
]: ...
-
@typing.overload
def detect(
self,
@@ -3040,7 +2886,6 @@ class HOGDescriptor:
typing.Sequence[cv2.typing.Point],
typing.Sequence[float],
]: ...
-
@typing.overload
def detectMultiScale(
self,
@@ -3055,7 +2900,6 @@ class HOGDescriptor:
typing.Sequence[cv2.typing.Rect],
typing.Sequence[float],
]: ...
-
@typing.overload
def detectMultiScale(
self,
@@ -3070,7 +2914,6 @@ class HOGDescriptor:
typing.Sequence[cv2.typing.Rect],
typing.Sequence[float],
]: ...
-
@typing.overload
def computeGradient(
self,
@@ -3083,7 +2926,6 @@ class HOGDescriptor:
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def computeGradient(
self,
@@ -3096,14 +2938,11 @@ class HOGDescriptor:
UMat,
UMat,
]: ...
-
@staticmethod
def getDefaultPeopleDetector() -> typing.Sequence[float]: ...
-
@staticmethod
def getDaimlerPeopleDetector() -> typing.Sequence[float]: ...
-
class QRCodeEncoder:
# Classes
class Params:
@@ -3119,17 +2958,16 @@ class QRCodeEncoder:
@classmethod
def create(cls, parameters: QRCodeEncoder.Params = ...) -> QRCodeEncoder: ...
-
@typing.overload
def encode(self, encoded_info: str, qrcode: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def encode(self, encoded_info: str, qrcode: UMat | None = ...) -> UMat: ...
-
@typing.overload
def encodeStructuredAppend(
- self, encoded_info: str, qrcodes: typing.Sequence[cv2.typing.MatLike] | None = ...,
+ self,
+ encoded_info: str,
+ qrcodes: typing.Sequence[cv2.typing.MatLike] | None = ...,
) -> typing.Sequence[cv2.typing.MatLike]: ...
-
@typing.overload
def encodeStructuredAppend(
self,
@@ -3137,7 +2975,6 @@ class QRCodeEncoder:
qrcodes: typing.Sequence[UMat] | None = ...,
) -> typing.Sequence[UMat]: ...
-
class GraphicalCodeDetector:
# Functions
@typing.overload
@@ -3149,10 +2986,8 @@ class GraphicalCodeDetector:
bool,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def detect(self, img: UMat, points: UMat | None = ...) -> tuple[bool, UMat]: ...
-
@typing.overload
def decode(
self,
@@ -3163,10 +2998,8 @@ class GraphicalCodeDetector:
str,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def decode(self, img: UMat, points: UMat, straight_code: UMat | None = ...) -> tuple[str, UMat]: ...
-
@typing.overload
def detectAndDecode(
self,
@@ -3178,7 +3011,6 @@ class GraphicalCodeDetector:
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def detectAndDecode(
self,
@@ -3190,7 +3022,6 @@ class GraphicalCodeDetector:
UMat,
UMat,
]: ...
-
@typing.overload
def detectMulti(
self,
@@ -3200,10 +3031,8 @@ class GraphicalCodeDetector:
bool,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def detectMulti(self, img: UMat, points: UMat | None = ...) -> tuple[bool, UMat]: ...
-
@typing.overload
def decodeMulti(
self,
@@ -3215,7 +3044,6 @@ class GraphicalCodeDetector:
typing.Sequence[str],
typing.Sequence[cv2.typing.MatLike],
]: ...
-
@typing.overload
def decodeMulti(
self,
@@ -3227,7 +3055,6 @@ class GraphicalCodeDetector:
typing.Sequence[str],
typing.Sequence[UMat],
]: ...
-
@typing.overload
def detectAndDecodeMulti(
self,
@@ -3240,7 +3067,6 @@ class GraphicalCodeDetector:
cv2.typing.MatLike,
typing.Sequence[cv2.typing.MatLike],
]: ...
-
@typing.overload
def detectAndDecodeMulti(
self,
@@ -3254,25 +3080,16 @@ class GraphicalCodeDetector:
typing.Sequence[UMat],
]: ...
-
class FaceDetectorYN:
# Functions
def setInputSize(self, input_size: cv2.typing.Size) -> None: ...
-
def getInputSize(self) -> cv2.typing.Size: ...
-
def setScoreThreshold(self, score_threshold: float) -> None: ...
-
def getScoreThreshold(self) -> float: ...
-
def setNMSThreshold(self, nms_threshold: float) -> None: ...
-
def getNMSThreshold(self) -> float: ...
-
def setTopK(self, top_k: int) -> None: ...
-
def getTopK(self) -> int: ...
-
@typing.overload
def detect(
self,
@@ -3282,10 +3099,8 @@ class FaceDetectorYN:
int,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def detect(self, image: UMat, faces: UMat | None = ...) -> tuple[int, UMat]: ...
-
@classmethod
def create(
cls,
@@ -3299,27 +3114,25 @@ class FaceDetectorYN:
target_id: int = ...,
) -> FaceDetectorYN: ...
-
class FaceRecognizerSF:
# Functions
@typing.overload
def alignCrop(
- self, src_img: cv2.typing.MatLike, face_box: cv2.typing.MatLike,
+ self,
+ src_img: cv2.typing.MatLike,
+ face_box: cv2.typing.MatLike,
aligned_img: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
@typing.overload
def alignCrop(self, src_img: UMat, face_box: UMat, aligned_img: UMat | None = ...) -> UMat: ...
-
@typing.overload
def feature(
- self, aligned_img: cv2.typing.MatLike,
+ self,
+ aligned_img: cv2.typing.MatLike,
face_feature: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
@typing.overload
def feature(self, aligned_img: UMat, face_feature: UMat | None = ...) -> UMat: ...
-
@typing.overload
def match(
self,
@@ -3327,62 +3140,43 @@ class FaceRecognizerSF:
face_feature2: cv2.typing.MatLike,
dis_type: int = ...,
) -> float: ...
-
@typing.overload
def match(self, face_feature1: UMat, face_feature2: UMat, dis_type: int = ...) -> float: ...
-
@classmethod
def create(cls, model: str, config: str, backend_id: int = ..., target_id: int = ...) -> FaceRecognizerSF: ...
-
class Stitcher:
# Functions
@classmethod
def create(cls, mode: Stitcher_Mode = ...) -> Stitcher: ...
-
def registrationResol(self) -> float: ...
-
def setRegistrationResol(self, resol_mpx: float) -> None: ...
-
def seamEstimationResol(self) -> float: ...
-
def setSeamEstimationResol(self, resol_mpx: float) -> None: ...
-
def compositingResol(self) -> float: ...
-
def setCompositingResol(self, resol_mpx: float) -> None: ...
-
def panoConfidenceThresh(self) -> float: ...
-
def setPanoConfidenceThresh(self, conf_thresh: float) -> None: ...
-
def waveCorrection(self) -> bool: ...
-
def setWaveCorrection(self, flag: bool) -> None: ...
-
def interpolationFlags(self) -> InterpolationFlags: ...
-
def setInterpolationFlags(self, interp_flags: InterpolationFlags) -> None: ...
-
@typing.overload
def estimateTransform(
self,
images: typing.Sequence[cv2.typing.MatLike],
masks: typing.Sequence[cv2.typing.MatLike] | None = ...,
) -> Stitcher_Status: ...
-
@typing.overload
def estimateTransform(
self,
images: typing.Sequence[UMat],
masks: typing.Sequence[UMat] | None = ...,
) -> Stitcher_Status: ...
-
@typing.overload
def composePanorama(self, pano: cv2.typing.MatLike | None = ...) -> tuple[Stitcher_Status, cv2.typing.MatLike]: ...
@typing.overload
def composePanorama(self, pano: UMat | None = ...) -> tuple[Stitcher_Status, UMat]: ...
-
@typing.overload
def composePanorama(
self,
@@ -3392,7 +3186,6 @@ class Stitcher:
Stitcher_Status,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def composePanorama(
self,
@@ -3402,7 +3195,6 @@ class Stitcher:
Stitcher_Status,
UMat,
]: ...
-
@typing.overload
def stitch(
self,
@@ -3412,10 +3204,8 @@ class Stitcher:
Stitcher_Status,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def stitch(self, images: typing.Sequence[UMat], pano: UMat | None = ...) -> tuple[Stitcher_Status, UMat]: ...
-
@typing.overload
def stitch(
self,
@@ -3426,7 +3216,6 @@ class Stitcher:
Stitcher_Status,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def stitch(
self,
@@ -3437,40 +3226,36 @@ class Stitcher:
Stitcher_Status,
UMat,
]: ...
-
def workScale(self) -> float: ...
-
class PyRotationWarper:
# Functions
@typing.overload
def __init__(self, type: str, scale: float) -> None: ...
@typing.overload
def __init__(self) -> None: ...
-
@typing.overload
def warpPoint(self, pt: cv2.typing.Point2f, K: cv2.typing.MatLike, R: cv2.typing.MatLike) -> cv2.typing.Point2f: ...
@typing.overload
def warpPoint(self, pt: cv2.typing.Point2f, K: UMat, R: UMat) -> cv2.typing.Point2f: ...
-
@typing.overload
def warpPointBackward(
- self, pt: cv2.typing.Point2f, K: cv2.typing.MatLike,
+ self,
+ pt: cv2.typing.Point2f,
+ K: cv2.typing.MatLike,
R: cv2.typing.MatLike,
) -> cv2.typing.Point2f: ...
-
@typing.overload
def warpPointBackward(self, pt: cv2.typing.Point2f, K: UMat, R: UMat) -> cv2.typing.Point2f: ...
-
@typing.overload
def warpPointBackward(
- self, pt: cv2.typing.Point2f, K: cv2.typing.MatLike,
+ self,
+ pt: cv2.typing.Point2f,
+ K: cv2.typing.MatLike,
R: cv2.typing.MatLike,
) -> cv2.typing.Point2f: ...
-
@typing.overload
def warpPointBackward(self, pt: cv2.typing.Point2f, K: UMat, R: UMat) -> cv2.typing.Point2f: ...
-
@typing.overload
def buildMaps(
self,
@@ -3484,7 +3269,6 @@ class PyRotationWarper:
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def buildMaps(
self,
@@ -3498,7 +3282,6 @@ class PyRotationWarper:
UMat,
UMat,
]: ...
-
@typing.overload
def warp(
self,
@@ -3512,7 +3295,6 @@ class PyRotationWarper:
cv2.typing.Point,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def warp(
self,
@@ -3526,7 +3308,6 @@ class PyRotationWarper:
cv2.typing.Point,
UMat,
]: ...
-
@typing.overload
def warpBackward(
self,
@@ -3538,7 +3319,6 @@ class PyRotationWarper:
dst_size: cv2.typing.Size,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
@typing.overload
def warpBackward(
self,
@@ -3550,20 +3330,14 @@ class PyRotationWarper:
dst_size: cv2.typing.Size,
dst: UMat | None = ...,
) -> UMat: ...
-
@typing.overload
def warpRoi(self, src_size: cv2.typing.Size, K: cv2.typing.MatLike, R: cv2.typing.MatLike) -> cv2.typing.Rect: ...
@typing.overload
def warpRoi(self, src_size: cv2.typing.Size, K: UMat, R: UMat) -> cv2.typing.Rect: ...
-
def getScale(self) -> float: ...
-
def setScale(self, arg1: float) -> None: ...
-
-class WarperCreator:
- ...
-
+class WarperCreator: ...
class KalmanFilter:
statePre: cv2.typing.MatLike
@@ -3582,28 +3356,21 @@ class KalmanFilter:
def __init__(self) -> None: ...
@typing.overload
def __init__(self, dynamParams: int, measureParams: int, controlParams: int = ..., type: int = ...) -> None: ...
-
def predict(self, control: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
-
def correct(self, measurement: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
-
class Tracker:
# Functions
@typing.overload
def init(self, image: cv2.typing.MatLike, boundingBox: cv2.typing.Rect) -> None: ...
@typing.overload
def init(self, image: UMat, boundingBox: cv2.typing.Rect) -> None: ...
-
@typing.overload
def update(self, image: cv2.typing.MatLike) -> tuple[bool, cv2.typing.Rect]: ...
@typing.overload
def update(self, image: UMat) -> tuple[bool, cv2.typing.Rect]: ...
-
-class GArrayDesc:
- ...
-
+class GArrayDesc: ...
class GComputation:
# Functions
@@ -3615,44 +3382,38 @@ class GComputation:
def __init__(self, in_: GMat, out: GScalar) -> None: ...
@typing.overload
def __init__(self, in1: GMat, in2: GMat, out: GMat) -> None: ...
-
def apply(
- self, callback: cv2.typing.ExtractArgsCallback,
+ self,
+ callback: cv2.typing.ExtractArgsCallback,
args: typing.Sequence[GCompileArg] = ...,
) -> typing.Sequence[cv2.typing.GRunArg]: ...
-
@typing.overload
def compileStreaming(
self,
in_metas: typing.Sequence[cv2.typing.GMetaArg],
args: typing.Sequence[GCompileArg] = ...,
) -> GStreamingCompiled: ...
-
@typing.overload
def compileStreaming(self, args: typing.Sequence[GCompileArg] = ...) -> GStreamingCompiled: ...
-
@typing.overload
def compileStreaming(
- self, callback: cv2.typing.ExtractMetaCallback,
+ self,
+ callback: cv2.typing.ExtractMetaCallback,
args: typing.Sequence[GCompileArg] = ...,
) -> GStreamingCompiled: ...
-
class GFrame:
# Functions
def __init__(self) -> None: ...
-
class GKernelPackage:
# Functions
def size(self) -> int: ...
-
class GMat:
# Functions
def __init__(self) -> None: ...
-
class GMatDesc:
@property
def depth(self) -> int: ...
@@ -3674,29 +3435,20 @@ class GMatDesc:
def __init__(self, d: int, dd: typing.Sequence[int]) -> None: ...
@typing.overload
def __init__(self) -> None: ...
-
@typing.overload
def withSizeDelta(self, delta: cv2.typing.Size) -> GMatDesc: ...
@typing.overload
def withSizeDelta(self, dx: int, dy: int) -> GMatDesc: ...
-
def withSize(self, sz: cv2.typing.Size) -> GMatDesc: ...
-
def withDepth(self, ddepth: int) -> GMatDesc: ...
-
def withType(self, ddepth: int, dchan: int) -> GMatDesc: ...
-
@typing.overload
def asPlanar(self) -> GMatDesc: ...
@typing.overload
def asPlanar(self, planes: int) -> GMatDesc: ...
-
def asInterleaved(self) -> GMatDesc: ...
-
-class GOpaqueDesc:
- ...
-
+class GOpaqueDesc: ...
class GScalar:
# Functions
@@ -3705,40 +3457,27 @@ class GScalar:
@typing.overload
def __init__(self, s: cv2.typing.Scalar) -> None: ...
-
-class GScalarDesc:
- ...
-
+class GScalarDesc: ...
class GStreamingCompiled:
# Functions
def __init__(self) -> None: ...
-
def setSource(self, callback: cv2.typing.ExtractArgsCallback) -> None: ...
-
def start(self) -> None: ...
-
def pull(self) -> tuple[bool, typing.Sequence[cv2.typing.GRunArg] | typing.Sequence[cv2.typing.GOptRunArg]]: ...
-
def stop(self) -> None: ...
-
def running(self) -> bool: ...
-
class GOpaqueT:
# Functions
def __init__(self, type: cv2.gapi.ArgType) -> None: ...
-
def type(self) -> cv2.gapi.ArgType: ...
-
class GArrayT:
# Functions
def __init__(self, type: cv2.gapi.ArgType) -> None: ...
-
def type(self) -> cv2.gapi.ArgType: ...
-
class GCompileArg:
# Functions
@typing.overload
@@ -3748,48 +3487,38 @@ class GCompileArg:
@typing.overload
def __init__(self, arg: cv2.gapi.streaming.queue_capacity) -> None: ...
-
class GInferInputs:
# Functions
def __init__(self) -> None: ...
-
@typing.overload
def setInput(self, name: str, value: GMat) -> GInferInputs: ...
@typing.overload
def setInput(self, name: str, value: GFrame) -> GInferInputs: ...
-
class GInferListInputs:
# Functions
def __init__(self) -> None: ...
-
@typing.overload
def setInput(self, name: str, value: GArrayT) -> GInferListInputs: ...
@typing.overload
def setInput(self, name: str, value: GArrayT) -> GInferListInputs: ...
-
class GInferOutputs:
# Functions
def __init__(self) -> None: ...
-
def at(self, name: str) -> GMat: ...
-
class GInferListOutputs:
# Functions
def __init__(self) -> None: ...
-
def at(self, name: str) -> GArrayT: ...
-
class GeneralizedHough(Algorithm):
# Functions
@typing.overload
def setTemplate(self, templ: cv2.typing.MatLike, templCenter: cv2.typing.Point = ...) -> None: ...
@typing.overload
def setTemplate(self, templ: UMat, templCenter: cv2.typing.Point = ...) -> None: ...
-
@typing.overload
def setTemplate(
self,
@@ -3798,10 +3527,8 @@ class GeneralizedHough(Algorithm):
dy: cv2.typing.MatLike,
templCenter: cv2.typing.Point = ...,
) -> None: ...
-
@typing.overload
def setTemplate(self, edges: UMat, dx: UMat, dy: UMat, templCenter: cv2.typing.Point = ...) -> None: ...
-
@typing.overload
def detect(
self,
@@ -3812,10 +3539,8 @@ class GeneralizedHough(Algorithm):
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def detect(self, image: UMat, positions: UMat | None = ..., votes: UMat | None = ...) -> tuple[UMat, UMat]: ...
-
@typing.overload
def detect(
self,
@@ -3828,7 +3553,6 @@ class GeneralizedHough(Algorithm):
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def detect(
self,
@@ -3841,46 +3565,29 @@ class GeneralizedHough(Algorithm):
UMat,
UMat,
]: ...
-
def setCannyLowThresh(self, cannyLowThresh: int) -> None: ...
-
def getCannyLowThresh(self) -> int: ...
-
def setCannyHighThresh(self, cannyHighThresh: int) -> None: ...
-
def getCannyHighThresh(self) -> int: ...
-
def setMinDist(self, minDist: float) -> None: ...
-
def getMinDist(self) -> float: ...
-
def setDp(self, dp: float) -> None: ...
-
def getDp(self) -> float: ...
-
def setMaxBufferSize(self, maxBufferSize: int) -> None: ...
-
def getMaxBufferSize(self) -> int: ...
-
class CLAHE(Algorithm):
# Functions
@typing.overload
def apply(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def apply(self, src: UMat, dst: UMat | None = ...) -> UMat: ...
-
def setClipLimit(self, clipLimit: float) -> None: ...
-
def getClipLimit(self) -> float: ...
-
def setTilesGridSize(self, tileGridSize: cv2.typing.Size) -> None: ...
-
def getTilesGridSize(self) -> cv2.typing.Size: ...
-
def collectGarbage(self) -> None: ...
-
class LineSegmentDetector(Algorithm):
# Functions
@typing.overload
@@ -3897,7 +3604,6 @@ class LineSegmentDetector(Algorithm):
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def detect(
self,
@@ -3912,12 +3618,10 @@ class LineSegmentDetector(Algorithm):
UMat,
UMat,
]: ...
-
@typing.overload
def drawSegments(self, image: cv2.typing.MatLike, lines: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
@typing.overload
def drawSegments(self, image: UMat, lines: UMat) -> UMat: ...
-
@typing.overload
def compareSegments(
self,
@@ -3929,7 +3633,6 @@ class LineSegmentDetector(Algorithm):
int,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def compareSegments(
self,
@@ -3942,19 +3645,15 @@ class LineSegmentDetector(Algorithm):
UMat,
]: ...
-
class Tonemap(Algorithm):
# Functions
@typing.overload
def process(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def process(self, src: UMat, dst: UMat | None = ...) -> UMat: ...
-
def getGamma(self) -> float: ...
-
def setGamma(self, gamma: float) -> None: ...
-
class AlignExposures(Algorithm):
# Functions
@typing.overload
@@ -3965,7 +3664,6 @@ class AlignExposures(Algorithm):
times: cv2.typing.MatLike,
response: cv2.typing.MatLike,
) -> None: ...
-
@typing.overload
def process(
self,
@@ -3975,7 +3673,6 @@ class AlignExposures(Algorithm):
response: UMat,
) -> None: ...
-
class CalibrateCRF(Algorithm):
# Functions
@typing.overload
@@ -3985,11 +3682,9 @@ class CalibrateCRF(Algorithm):
times: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
@typing.overload
def process(self, src: typing.Sequence[UMat], times: UMat, dst: UMat | None = ...) -> UMat: ...
-
class MergeExposures(Algorithm):
# Functions
@typing.overload
@@ -4000,26 +3695,24 @@ class MergeExposures(Algorithm):
response: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
@typing.overload
def process(self, src: typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ...
-
class AffineFeature(Feature2D):
# Functions
@classmethod
def create(
- cls, backend: Feature2D, maxTilt: int = ..., minTilt: int = ...,
- tiltStep: float = ..., rotateStepBase: float = ...,
+ cls,
+ backend: Feature2D,
+ maxTilt: int = ...,
+ minTilt: int = ...,
+ tiltStep: float = ...,
+ rotateStepBase: float = ...,
) -> AffineFeature: ...
-
def setViewParams(self, tilts: typing.Sequence[float], rolls: typing.Sequence[float]) -> None: ...
-
def getViewParams(self, tilts: typing.Sequence[float], rolls: typing.Sequence[float]) -> None: ...
-
def getDefaultName(self) -> str: ...
-
class SIFT(Feature2D):
# Functions
@classmethod
@@ -4033,43 +3726,35 @@ class SIFT(Feature2D):
sigma: float = ...,
enable_precise_upscale: bool = ...,
) -> SIFT: ...
-
@classmethod
@typing.overload
def create(
- cls, nfeatures: int, nOctaveLayers: int, contrastThreshold: float, edgeThreshold: float,
- sigma: float, descriptorType: int, enable_precise_upscale: bool = ...,
+ cls,
+ nfeatures: int,
+ nOctaveLayers: int,
+ contrastThreshold: float,
+ edgeThreshold: float,
+ sigma: float,
+ descriptorType: int,
+ enable_precise_upscale: bool = ...,
) -> SIFT: ...
-
def getDefaultName(self) -> str: ...
-
def setNFeatures(self, maxFeatures: int) -> None: ...
-
def getNFeatures(self) -> int: ...
-
def setNOctaveLayers(self, nOctaveLayers: int) -> None: ...
-
def getNOctaveLayers(self) -> int: ...
-
def setContrastThreshold(self, contrastThreshold: float) -> None: ...
-
def getContrastThreshold(self) -> float: ...
-
def setEdgeThreshold(self, edgeThreshold: float) -> None: ...
-
def getEdgeThreshold(self) -> float: ...
-
def setSigma(self, sigma: float) -> None: ...
-
def getSigma(self) -> float: ...
-
class BRISK(Feature2D):
# Functions
@classmethod
@typing.overload
def create(cls, thresh: int = ..., octaves: int = ..., patternScale: float = ...) -> BRISK: ...
-
@classmethod
@typing.overload
def create(
@@ -4080,7 +3765,6 @@ class BRISK(Feature2D):
dMin: float = ...,
indexChange: typing.Sequence[int] = ...,
) -> BRISK: ...
-
@classmethod
@typing.overload
def create(
@@ -4093,22 +3777,14 @@ class BRISK(Feature2D):
dMin: float = ...,
indexChange: typing.Sequence[int] = ...,
) -> BRISK: ...
-
def getDefaultName(self) -> str: ...
-
def setThreshold(self, threshold: int) -> None: ...
-
def getThreshold(self) -> int: ...
-
def setOctaves(self, octaves: int) -> None: ...
-
def getOctaves(self) -> int: ...
-
def setPatternScale(self, patternScale: float) -> None: ...
-
def getPatternScale(self) -> float: ...
-
class ORB(Feature2D):
# Functions
@classmethod
@@ -4124,46 +3800,26 @@ class ORB(Feature2D):
patchSize: int = ...,
fastThreshold: int = ...,
) -> ORB: ...
-
def setMaxFeatures(self, maxFeatures: int) -> None: ...
-
def getMaxFeatures(self) -> int: ...
-
def setScaleFactor(self, scaleFactor: float) -> None: ...
-
def getScaleFactor(self) -> float: ...
-
def setNLevels(self, nlevels: int) -> None: ...
-
def getNLevels(self) -> int: ...
-
def setEdgeThreshold(self, edgeThreshold: int) -> None: ...
-
def getEdgeThreshold(self) -> int: ...
-
def setFirstLevel(self, firstLevel: int) -> None: ...
-
def getFirstLevel(self) -> int: ...
-
def setWTA_K(self, wta_k: int) -> None: ...
-
def getWTA_K(self) -> int: ...
-
def setScoreType(self, scoreType: ORB_ScoreType) -> None: ...
-
def getScoreType(self) -> ORB_ScoreType: ...
-
def setPatchSize(self, patchSize: int) -> None: ...
-
def getPatchSize(self) -> int: ...
-
def setFastThreshold(self, fastThreshold: int) -> None: ...
-
def getFastThreshold(self) -> int: ...
-
def getDefaultName(self) -> str: ...
-
class MSER(Feature2D):
# Functions
@classmethod
@@ -4179,7 +3835,6 @@ class MSER(Feature2D):
min_margin: float = ...,
edge_blur_size: int = ...,
) -> MSER: ...
-
@typing.overload
def detectRegions(
self,
@@ -4188,7 +3843,6 @@ class MSER(Feature2D):
typing.Sequence[typing.Sequence[cv2.typing.Point]],
typing.Sequence[cv2.typing.Rect],
]: ...
-
@typing.overload
def detectRegions(
self,
@@ -4197,96 +3851,62 @@ class MSER(Feature2D):
typing.Sequence[typing.Sequence[cv2.typing.Point]],
typing.Sequence[cv2.typing.Rect],
]: ...
-
def setDelta(self, delta: int) -> None: ...
-
def getDelta(self) -> int: ...
-
def setMinArea(self, minArea: int) -> None: ...
-
def getMinArea(self) -> int: ...
-
def setMaxArea(self, maxArea: int) -> None: ...
-
def getMaxArea(self) -> int: ...
-
def setMaxVariation(self, maxVariation: float) -> None: ...
-
def getMaxVariation(self) -> float: ...
-
def setMinDiversity(self, minDiversity: float) -> None: ...
-
def getMinDiversity(self) -> float: ...
-
def setMaxEvolution(self, maxEvolution: int) -> None: ...
-
def getMaxEvolution(self) -> int: ...
-
def setAreaThreshold(self, areaThreshold: float) -> None: ...
-
def getAreaThreshold(self) -> float: ...
-
def setMinMargin(self, min_margin: float) -> None: ...
-
def getMinMargin(self) -> float: ...
-
def setEdgeBlurSize(self, edge_blur_size: int) -> None: ...
-
def getEdgeBlurSize(self) -> int: ...
-
def setPass2Only(self, f: bool) -> None: ...
-
def getPass2Only(self) -> bool: ...
-
def getDefaultName(self) -> str: ...
-
class FastFeatureDetector(Feature2D):
# Functions
@classmethod
def create(
- cls, threshold: int = ..., nonmaxSuppression: bool = ...,
+ cls,
+ threshold: int = ...,
+ nonmaxSuppression: bool = ...,
type: FastFeatureDetector_DetectorType = ...,
) -> FastFeatureDetector: ...
-
def setThreshold(self, threshold: int) -> None: ...
-
def getThreshold(self) -> int: ...
-
def setNonmaxSuppression(self, f: bool) -> None: ...
-
def getNonmaxSuppression(self) -> bool: ...
-
def setType(self, type: FastFeatureDetector_DetectorType) -> None: ...
-
def getType(self) -> FastFeatureDetector_DetectorType: ...
-
def getDefaultName(self) -> str: ...
-
class AgastFeatureDetector(Feature2D):
# Functions
@classmethod
def create(
- cls, threshold: int = ..., nonmaxSuppression: bool = ...,
+ cls,
+ threshold: int = ...,
+ nonmaxSuppression: bool = ...,
type: AgastFeatureDetector_DetectorType = ...,
) -> AgastFeatureDetector: ...
-
def setThreshold(self, threshold: int) -> None: ...
-
def getThreshold(self) -> int: ...
-
def setNonmaxSuppression(self, f: bool) -> None: ...
-
def getNonmaxSuppression(self) -> bool: ...
-
def setType(self, type: AgastFeatureDetector_DetectorType) -> None: ...
-
def getType(self) -> AgastFeatureDetector_DetectorType: ...
-
def getDefaultName(self) -> str: ...
-
class GFTTDetector(Feature2D):
# Functions
@classmethod
@@ -4300,7 +3920,6 @@ class GFTTDetector(Feature2D):
useHarrisDetector: bool = ...,
k: float = ...,
) -> GFTTDetector: ...
-
@classmethod
@typing.overload
def create(
@@ -4313,38 +3932,22 @@ class GFTTDetector(Feature2D):
useHarrisDetector: bool = ...,
k: float = ...,
) -> GFTTDetector: ...
-
def setMaxFeatures(self, maxFeatures: int) -> None: ...
-
def getMaxFeatures(self) -> int: ...
-
def setQualityLevel(self, qlevel: float) -> None: ...
-
def getQualityLevel(self) -> float: ...
-
def setMinDistance(self, minDistance: float) -> None: ...
-
def getMinDistance(self) -> float: ...
-
def setBlockSize(self, blockSize: int) -> None: ...
-
def getBlockSize(self) -> int: ...
-
def setGradientSize(self, gradientSize_: int) -> None: ...
-
def getGradientSize(self) -> int: ...
-
def setHarrisDetector(self, val: bool) -> None: ...
-
def getHarrisDetector(self) -> bool: ...
-
def setK(self, k: float) -> None: ...
-
def getK(self) -> float: ...
-
def getDefaultName(self) -> str: ...
-
class SimpleBlobDetector(Feature2D):
# Classes
class Params:
@@ -4376,51 +3979,37 @@ class SimpleBlobDetector(Feature2D):
@classmethod
def create(cls, parameters: SimpleBlobDetector.Params = ...) -> SimpleBlobDetector: ...
-
def setParams(self, params: SimpleBlobDetector.Params) -> None: ...
-
def getParams(self) -> SimpleBlobDetector.Params: ...
-
def getDefaultName(self) -> str: ...
-
def getBlobContours(self) -> typing.Sequence[typing.Sequence[cv2.typing.Point]]: ...
-
class KAZE(Feature2D):
# Functions
@classmethod
def create(
- cls, extended: bool = ..., upright: bool = ..., threshold: float = ..., nOctaves: int = ...,
- nOctaveLayers: int = ..., diffusivity: KAZE_DiffusivityType = ...,
+ cls,
+ extended: bool = ...,
+ upright: bool = ...,
+ threshold: float = ...,
+ nOctaves: int = ...,
+ nOctaveLayers: int = ...,
+ diffusivity: KAZE_DiffusivityType = ...,
) -> KAZE: ...
-
def setExtended(self, extended: bool) -> None: ...
-
def getExtended(self) -> bool: ...
-
def setUpright(self, upright: bool) -> None: ...
-
def getUpright(self) -> bool: ...
-
def setThreshold(self, threshold: float) -> None: ...
-
def getThreshold(self) -> float: ...
-
def setNOctaves(self, octaves: int) -> None: ...
-
def getNOctaves(self) -> int: ...
-
def setNOctaveLayers(self, octaveLayers: int) -> None: ...
-
def getNOctaveLayers(self) -> int: ...
-
def setDiffusivity(self, diff: KAZE_DiffusivityType) -> None: ...
-
def getDiffusivity(self) -> KAZE_DiffusivityType: ...
-
def getDefaultName(self) -> str: ...
-
class AKAZE(Feature2D):
# Functions
@classmethod
@@ -4434,76 +4023,55 @@ class AKAZE(Feature2D):
nOctaveLayers: int = ...,
diffusivity: KAZE_DiffusivityType = ...,
) -> AKAZE: ...
-
def setDescriptorType(self, dtype: AKAZE_DescriptorType) -> None: ...
-
def getDescriptorType(self) -> AKAZE_DescriptorType: ...
-
def setDescriptorSize(self, dsize: int) -> None: ...
-
def getDescriptorSize(self) -> int: ...
-
def setDescriptorChannels(self, dch: int) -> None: ...
-
def getDescriptorChannels(self) -> int: ...
-
def setThreshold(self, threshold: float) -> None: ...
-
def getThreshold(self) -> float: ...
-
def setNOctaves(self, octaves: int) -> None: ...
-
def getNOctaves(self) -> int: ...
-
def setNOctaveLayers(self, octaveLayers: int) -> None: ...
-
def getNOctaveLayers(self) -> int: ...
-
def setDiffusivity(self, diff: KAZE_DiffusivityType) -> None: ...
-
def getDiffusivity(self) -> KAZE_DiffusivityType: ...
-
def getDefaultName(self) -> str: ...
-
class DescriptorMatcher(Algorithm):
# Functions
@typing.overload
def add(self, descriptors: typing.Sequence[cv2.typing.MatLike]) -> None: ...
@typing.overload
def add(self, descriptors: typing.Sequence[UMat]) -> None: ...
-
def getTrainDescriptors(self) -> typing.Sequence[cv2.typing.MatLike]: ...
-
def clear(self) -> None: ...
-
def empty(self) -> bool: ...
-
def isMaskSupported(self) -> bool: ...
-
def train(self) -> None: ...
-
@typing.overload
def match(
- self, queryDescriptors: cv2.typing.MatLike, trainDescriptors: cv2.typing.MatLike,
+ self,
+ queryDescriptors: cv2.typing.MatLike,
+ trainDescriptors: cv2.typing.MatLike,
mask: cv2.typing.MatLike | None = ...,
) -> typing.Sequence[DMatch]: ...
-
@typing.overload
def match(
- self, queryDescriptors: UMat, trainDescriptors: UMat,
+ self,
+ queryDescriptors: UMat,
+ trainDescriptors: UMat,
mask: UMat | None = ...,
) -> typing.Sequence[DMatch]: ...
-
@typing.overload
def match(
- self, queryDescriptors: cv2.typing.MatLike,
+ self,
+ queryDescriptors: cv2.typing.MatLike,
masks: typing.Sequence[cv2.typing.MatLike] | None = ...,
) -> typing.Sequence[DMatch]: ...
-
@typing.overload
def match(self, queryDescriptors: UMat, masks: typing.Sequence[UMat] | None = ...) -> typing.Sequence[DMatch]: ...
-
@typing.overload
def knnMatch(
self,
@@ -4513,13 +4081,15 @@ class DescriptorMatcher(Algorithm):
mask: cv2.typing.MatLike | None = ...,
compactResult: bool = ...,
) -> typing.Sequence[typing.Sequence[DMatch]]: ...
-
@typing.overload
def knnMatch(
- self, queryDescriptors: UMat, trainDescriptors: UMat, k: int, mask: UMat | None = ...,
+ self,
+ queryDescriptors: UMat,
+ trainDescriptors: UMat,
+ k: int,
+ mask: UMat | None = ...,
compactResult: bool = ...,
) -> typing.Sequence[typing.Sequence[DMatch]]: ...
-
@typing.overload
def knnMatch(
self,
@@ -4528,7 +4098,6 @@ class DescriptorMatcher(Algorithm):
masks: typing.Sequence[cv2.typing.MatLike] | None = ...,
compactResult: bool = ...,
) -> typing.Sequence[typing.Sequence[DMatch]]: ...
-
@typing.overload
def knnMatch(
self,
@@ -4537,7 +4106,6 @@ class DescriptorMatcher(Algorithm):
masks: typing.Sequence[UMat] | None = ...,
compactResult: bool = ...,
) -> typing.Sequence[typing.Sequence[DMatch]]: ...
-
@typing.overload
def radiusMatch(
self,
@@ -4547,13 +4115,15 @@ class DescriptorMatcher(Algorithm):
mask: cv2.typing.MatLike | None = ...,
compactResult: bool = ...,
) -> typing.Sequence[typing.Sequence[DMatch]]: ...
-
@typing.overload
def radiusMatch(
- self, queryDescriptors: UMat, trainDescriptors: UMat, maxDistance: float, mask: UMat |
- None = ..., compactResult: bool = ...,
+ self,
+ queryDescriptors: UMat,
+ trainDescriptors: UMat,
+ maxDistance: float,
+ mask: UMat | None = ...,
+ compactResult: bool = ...,
) -> typing.Sequence[typing.Sequence[DMatch]]: ...
-
@typing.overload
def radiusMatch(
self,
@@ -4562,7 +4132,6 @@ class DescriptorMatcher(Algorithm):
masks: typing.Sequence[cv2.typing.MatLike] | None = ...,
compactResult: bool = ...,
) -> typing.Sequence[typing.Sequence[DMatch]]: ...
-
@typing.overload
def radiusMatch(
self,
@@ -4571,19 +4140,15 @@ class DescriptorMatcher(Algorithm):
masks: typing.Sequence[UMat] | None = ...,
compactResult: bool = ...,
) -> typing.Sequence[typing.Sequence[DMatch]]: ...
-
@typing.overload
def write(self, fileName: str) -> None: ...
@typing.overload
def write(self, fs: FileStorage, name: str) -> None: ...
-
@typing.overload
def read(self, fileName: str) -> None: ...
@typing.overload
def read(self, arg1: FileNode) -> None: ...
-
def clone(self, emptyTrainData: bool = ...) -> DescriptorMatcher: ...
-
@classmethod
@typing.overload
def create(cls, descriptorMatcherType: str) -> DescriptorMatcher: ...
@@ -4591,7 +4156,6 @@ class DescriptorMatcher(Algorithm):
@typing.overload
def create(cls, matcherType: DescriptorMatcher_MatcherType) -> DescriptorMatcher: ...
-
class BOWKMeansTrainer(BOWTrainer):
# Functions
def __init__(
@@ -4601,63 +4165,43 @@ class BOWKMeansTrainer(BOWTrainer):
attempts: int = ...,
flags: int = ...,
) -> None: ...
-
@typing.overload
def cluster(self) -> cv2.typing.MatLike: ...
@typing.overload
def cluster(self, descriptors: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
-
class StereoMatcher(Algorithm):
# Functions
@typing.overload
def compute(
- self, left: cv2.typing.MatLike, right: cv2.typing.MatLike,
+ self,
+ left: cv2.typing.MatLike,
+ right: cv2.typing.MatLike,
disparity: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
@typing.overload
def compute(self, left: UMat, right: UMat, disparity: UMat | None = ...) -> UMat: ...
-
def getMinDisparity(self) -> int: ...
-
def setMinDisparity(self, minDisparity: int) -> None: ...
-
def getNumDisparities(self) -> int: ...
-
def setNumDisparities(self, numDisparities: int) -> None: ...
-
def getBlockSize(self) -> int: ...
-
def setBlockSize(self, blockSize: int) -> None: ...
-
def getSpeckleWindowSize(self) -> int: ...
-
def setSpeckleWindowSize(self, speckleWindowSize: int) -> None: ...
-
def getSpeckleRange(self) -> int: ...
-
def setSpeckleRange(self, speckleRange: int) -> None: ...
-
def getDisp12MaxDiff(self) -> int: ...
-
def setDisp12MaxDiff(self, disp12MaxDiff: int) -> None: ...
-
-class BaseCascadeClassifier(Algorithm):
- ...
-
+class BaseCascadeClassifier(Algorithm): ...
class QRCodeDetector(GraphicalCodeDetector):
# Functions
def __init__(self) -> None: ...
-
def setEpsX(self, epsX: float) -> QRCodeDetector: ...
-
def setEpsY(self, epsY: float) -> QRCodeDetector: ...
-
def setUseAlignmentMarkers(self, useAlignmentMarkers: bool) -> QRCodeDetector: ...
-
@typing.overload
def decodeCurved(
self,
@@ -4668,10 +4212,8 @@ class QRCodeDetector(GraphicalCodeDetector):
str,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def decodeCurved(self, img: UMat, points: UMat, straight_qrcode: UMat | None = ...) -> tuple[str, UMat]: ...
-
@typing.overload
def detectAndDecodeCurved(
self,
@@ -4683,7 +4225,6 @@ class QRCodeDetector(GraphicalCodeDetector):
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def detectAndDecodeCurved(
self,
@@ -4696,7 +4237,6 @@ class QRCodeDetector(GraphicalCodeDetector):
UMat,
]: ...
-
class QRCodeDetectorAruco(GraphicalCodeDetector):
# Classes
class Params:
@@ -4717,43 +4257,35 @@ class QRCodeDetectorAruco(GraphicalCodeDetector):
def __init__(self) -> None: ...
@typing.overload
def __init__(self, params: QRCodeDetectorAruco.Params) -> None: ...
-
def getDetectorParameters(self) -> QRCodeDetectorAruco.Params: ...
-
def setDetectorParameters(self, params: QRCodeDetectorAruco.Params) -> QRCodeDetectorAruco: ...
-
def getArucoParameters(self) -> cv2.aruco.DetectorParameters: ...
-
def setArucoParameters(self, params: cv2.aruco.DetectorParameters) -> None: ...
-
class BackgroundSubtractor(Algorithm):
# Functions
@typing.overload
def apply(
- self, image: cv2.typing.MatLike, fgmask: cv2.typing.MatLike |
- None = ..., learningRate: float = ...,
+ self,
+ image: cv2.typing.MatLike,
+ fgmask: cv2.typing.MatLike | None = ...,
+ learningRate: float = ...,
) -> cv2.typing.MatLike: ...
-
@typing.overload
def apply(self, image: UMat, fgmask: UMat | None = ..., learningRate: float = ...) -> UMat: ...
-
@typing.overload
def getBackgroundImage(self, backgroundImage: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def getBackgroundImage(self, backgroundImage: UMat | None = ...) -> UMat: ...
-
class DenseOpticalFlow(Algorithm):
# Functions
@typing.overload
def calc(self, I0: cv2.typing.MatLike, I1: cv2.typing.MatLike, flow: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
@typing.overload
def calc(self, I0: UMat, I1: UMat, flow: UMat) -> UMat: ...
-
def collectGarbage(self) -> None: ...
-
class SparseOpticalFlow(Algorithm):
# Functions
@typing.overload
@@ -4770,7 +4302,6 @@ class SparseOpticalFlow(Algorithm):
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def calc(
self,
@@ -4786,7 +4317,6 @@ class SparseOpticalFlow(Algorithm):
UMat,
]: ...
-
class TrackerMIL(Tracker):
# Classes
class Params:
@@ -4806,7 +4336,6 @@ class TrackerMIL(Tracker):
@classmethod
def create(cls, parameters: TrackerMIL.Params = ...) -> TrackerMIL: ...
-
class TrackerGOTURN(Tracker):
# Classes
class Params:
@@ -4821,7 +4350,6 @@ class TrackerGOTURN(Tracker):
@classmethod
def create(cls, parameters: TrackerGOTURN.Params = ...) -> TrackerGOTURN: ...
-
class TrackerDaSiamRPN(Tracker):
# Classes
class Params:
@@ -4838,10 +4366,8 @@ class TrackerDaSiamRPN(Tracker):
@classmethod
def create(cls, parameters: TrackerDaSiamRPN.Params = ...) -> TrackerDaSiamRPN: ...
-
def getTrackingScore(self) -> float: ...
-
class TrackerNano(Tracker):
# Classes
class Params:
@@ -4857,10 +4383,8 @@ class TrackerNano(Tracker):
@classmethod
def create(cls, parameters: TrackerNano.Params = ...) -> TrackerNano: ...
-
def getTrackingScore(self) -> float: ...
-
class error(Exception):
code: int
err: str
@@ -4869,106 +4393,63 @@ class error(Exception):
line: int
msg: str
-
class GeneralizedHoughBallard(GeneralizedHough):
# Functions
def setLevels(self, levels: int) -> None: ...
-
def getLevels(self) -> int: ...
-
def setVotesThreshold(self, votesThreshold: int) -> None: ...
-
def getVotesThreshold(self) -> int: ...
-
class GeneralizedHoughGuil(GeneralizedHough):
# Functions
def setXi(self, xi: float) -> None: ...
-
def getXi(self) -> float: ...
-
def setLevels(self, levels: int) -> None: ...
-
def getLevels(self) -> int: ...
-
def setAngleEpsilon(self, angleEpsilon: float) -> None: ...
-
def getAngleEpsilon(self) -> float: ...
-
def setMinAngle(self, minAngle: float) -> None: ...
-
def getMinAngle(self) -> float: ...
-
def setMaxAngle(self, maxAngle: float) -> None: ...
-
def getMaxAngle(self) -> float: ...
-
def setAngleStep(self, angleStep: float) -> None: ...
-
def getAngleStep(self) -> float: ...
-
def setAngleThresh(self, angleThresh: int) -> None: ...
-
def getAngleThresh(self) -> int: ...
-
def setMinScale(self, minScale: float) -> None: ...
-
def getMinScale(self) -> float: ...
-
def setMaxScale(self, maxScale: float) -> None: ...
-
def getMaxScale(self) -> float: ...
-
def setScaleStep(self, scaleStep: float) -> None: ...
-
def getScaleStep(self) -> float: ...
-
def setScaleThresh(self, scaleThresh: int) -> None: ...
-
def getScaleThresh(self) -> int: ...
-
def setPosThresh(self, posThresh: int) -> None: ...
-
def getPosThresh(self) -> int: ...
-
class TonemapDrago(Tonemap):
# Functions
def getSaturation(self) -> float: ...
-
def setSaturation(self, saturation: float) -> None: ...
-
def getBias(self) -> float: ...
-
def setBias(self, bias: float) -> None: ...
-
class TonemapReinhard(Tonemap):
# Functions
def getIntensity(self) -> float: ...
-
def setIntensity(self, intensity: float) -> None: ...
-
def getLightAdaptation(self) -> float: ...
-
def setLightAdaptation(self, light_adapt: float) -> None: ...
-
def getColorAdaptation(self) -> float: ...
-
def setColorAdaptation(self, color_adapt: float) -> None: ...
-
class TonemapMantiuk(Tonemap):
# Functions
def getScale(self) -> float: ...
-
def setScale(self, scale: float) -> None: ...
-
def getSaturation(self) -> float: ...
-
def setSaturation(self, saturation: float) -> None: ...
-
class AlignMTB(AlignExposures):
# Functions
@typing.overload
@@ -4979,7 +4460,6 @@ class AlignMTB(AlignExposures):
times: cv2.typing.MatLike,
response: cv2.typing.MatLike,
) -> None: ...
-
@typing.overload
def process(
self,
@@ -4988,26 +4468,23 @@ class AlignMTB(AlignExposures):
times: UMat,
response: UMat,
) -> None: ...
-
@typing.overload
def process(self, src: typing.Sequence[cv2.typing.MatLike], dst: typing.Sequence[cv2.typing.MatLike]) -> None: ...
@typing.overload
def process(self, src: typing.Sequence[UMat], dst: typing.Sequence[cv2.typing.MatLike]) -> None: ...
-
@typing.overload
def calculateShift(self, img0: cv2.typing.MatLike, img1: cv2.typing.MatLike) -> cv2.typing.Point: ...
@typing.overload
def calculateShift(self, img0: UMat, img1: UMat) -> cv2.typing.Point: ...
-
@typing.overload
def shiftMat(
- self, src: cv2.typing.MatLike, shift: cv2.typing.Point,
+ self,
+ src: cv2.typing.MatLike,
+ shift: cv2.typing.Point,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
@typing.overload
def shiftMat(self, src: UMat, shift: cv2.typing.Point, dst: UMat | None = ...) -> UMat: ...
-
@typing.overload
def computeBitmaps(
self,
@@ -5018,51 +4495,32 @@ class AlignMTB(AlignExposures):
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def computeBitmaps(self, img: UMat, tb: UMat | None = ..., eb: UMat | None = ...) -> tuple[UMat, UMat]: ...
-
def getMaxBits(self) -> int: ...
-
def setMaxBits(self, max_bits: int) -> None: ...
-
def getExcludeRange(self) -> int: ...
-
def setExcludeRange(self, exclude_range: int) -> None: ...
-
def getCut(self) -> bool: ...
-
def setCut(self, value: bool) -> None: ...
-
class CalibrateDebevec(CalibrateCRF):
# Functions
def getLambda(self) -> float: ...
-
def setLambda(self, lambda_: float) -> None: ...
-
def getSamples(self) -> int: ...
-
def setSamples(self, samples: int) -> None: ...
-
def getRandom(self) -> bool: ...
-
def setRandom(self, random: bool) -> None: ...
-
class CalibrateRobertson(CalibrateCRF):
# Functions
def getMaxIter(self) -> int: ...
-
def setMaxIter(self, max_iter: int) -> None: ...
-
def getThreshold(self) -> float: ...
-
def setThreshold(self, threshold: float) -> None: ...
-
def getRadiance(self) -> cv2.typing.MatLike: ...
-
class MergeDebevec(MergeExposures):
# Functions
@typing.overload
@@ -5073,10 +4531,8 @@ class MergeDebevec(MergeExposures):
response: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
@typing.overload
def process(self, src: typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ...
-
@typing.overload
def process(
self,
@@ -5084,11 +4540,9 @@ class MergeDebevec(MergeExposures):
times: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
@typing.overload
def process(self, src: typing.Sequence[UMat], times: UMat, dst: UMat | None = ...) -> UMat: ...
-
class MergeMertens(MergeExposures):
# Functions
@typing.overload
@@ -5099,32 +4553,23 @@ class MergeMertens(MergeExposures):
response: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
@typing.overload
def process(self, src: typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ...
-
@typing.overload
def process(
- self, src: typing.Sequence[cv2.typing.MatLike],
+ self,
+ src: typing.Sequence[cv2.typing.MatLike],
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
@typing.overload
def process(self, src: typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ...
-
def getContrastWeight(self) -> float: ...
-
def setContrastWeight(self, contrast_weiht: float) -> None: ...
-
def getSaturationWeight(self) -> float: ...
-
def setSaturationWeight(self, saturation_weight: float) -> None: ...
-
def getExposureWeight(self) -> float: ...
-
def setExposureWeight(self, exposure_weight: float) -> None: ...
-
class MergeRobertson(MergeExposures):
# Functions
@typing.overload
@@ -5135,10 +4580,8 @@ class MergeRobertson(MergeExposures):
response: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
@typing.overload
def process(self, src: typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ...
-
@typing.overload
def process(
self,
@@ -5146,90 +4589,58 @@ class MergeRobertson(MergeExposures):
times: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
@typing.overload
def process(self, src: typing.Sequence[UMat], times: UMat, dst: UMat | None = ...) -> UMat: ...
-
class BFMatcher(DescriptorMatcher):
# Functions
def __init__(self, normType: int = ..., crossCheck: bool = ...) -> None: ...
-
@classmethod
def create(cls, normType: int = ..., crossCheck: bool = ...) -> BFMatcher: ...
-
class FlannBasedMatcher(DescriptorMatcher):
# Functions
def __init__(
- self, indexParams: cv2.typing.IndexParams = ...,
+ self,
+ indexParams: cv2.typing.IndexParams = ...,
searchParams: cv2.typing.SearchParams = ...,
) -> None: ...
-
@classmethod
def create(cls) -> FlannBasedMatcher: ...
-
class StereoBM(StereoMatcher):
# Functions
def getPreFilterType(self) -> int: ...
-
def setPreFilterType(self, preFilterType: int) -> None: ...
-
def getPreFilterSize(self) -> int: ...
-
def setPreFilterSize(self, preFilterSize: int) -> None: ...
-
def getPreFilterCap(self) -> int: ...
-
def setPreFilterCap(self, preFilterCap: int) -> None: ...
-
def getTextureThreshold(self) -> int: ...
-
def setTextureThreshold(self, textureThreshold: int) -> None: ...
-
def getUniquenessRatio(self) -> int: ...
-
def setUniquenessRatio(self, uniquenessRatio: int) -> None: ...
-
def getSmallerBlockSize(self) -> int: ...
-
def setSmallerBlockSize(self, blockSize: int) -> None: ...
-
def getROI1(self) -> cv2.typing.Rect: ...
-
def setROI1(self, roi1: cv2.typing.Rect) -> None: ...
-
def getROI2(self) -> cv2.typing.Rect: ...
-
def setROI2(self, roi2: cv2.typing.Rect) -> None: ...
-
@classmethod
def create(cls, numDisparities: int = ..., blockSize: int = ...) -> StereoBM: ...
-
class StereoSGBM(StereoMatcher):
# Functions
def getPreFilterCap(self) -> int: ...
-
def setPreFilterCap(self, preFilterCap: int) -> None: ...
-
def getUniquenessRatio(self) -> int: ...
-
def setUniquenessRatio(self, uniquenessRatio: int) -> None: ...
-
def getP1(self) -> int: ...
-
def setP1(self, P1: int) -> None: ...
-
def getP2(self) -> int: ...
-
def setP2(self, P2: int) -> None: ...
-
def getMode(self) -> int: ...
-
def setMode(self, mode: int) -> None: ...
-
@classmethod
def create(
cls,
@@ -5246,132 +4657,77 @@ class StereoSGBM(StereoMatcher):
mode: int = ...,
) -> StereoSGBM: ...
-
class BackgroundSubtractorMOG2(BackgroundSubtractor):
# Functions
def getHistory(self) -> int: ...
-
def setHistory(self, history: int) -> None: ...
-
def getNMixtures(self) -> int: ...
-
def setNMixtures(self, nmixtures: int) -> None: ...
-
def getBackgroundRatio(self) -> float: ...
-
def setBackgroundRatio(self, ratio: float) -> None: ...
-
def getVarThreshold(self) -> float: ...
-
def setVarThreshold(self, varThreshold: float) -> None: ...
-
def getVarThresholdGen(self) -> float: ...
-
def setVarThresholdGen(self, varThresholdGen: float) -> None: ...
-
def getVarInit(self) -> float: ...
-
def setVarInit(self, varInit: float) -> None: ...
-
def getVarMin(self) -> float: ...
-
def setVarMin(self, varMin: float) -> None: ...
-
def getVarMax(self) -> float: ...
-
def setVarMax(self, varMax: float) -> None: ...
-
def getComplexityReductionThreshold(self) -> float: ...
-
def setComplexityReductionThreshold(self, ct: float) -> None: ...
-
def getDetectShadows(self) -> bool: ...
-
def setDetectShadows(self, detectShadows: bool) -> None: ...
-
def getShadowValue(self) -> int: ...
-
def setShadowValue(self, value: int) -> None: ...
-
def getShadowThreshold(self) -> float: ...
-
def setShadowThreshold(self, threshold: float) -> None: ...
-
@typing.overload
def apply(
- self, image: cv2.typing.MatLike, fgmask: cv2.typing.MatLike |
- None = ..., learningRate: float = ...,
+ self,
+ image: cv2.typing.MatLike,
+ fgmask: cv2.typing.MatLike | None = ...,
+ learningRate: float = ...,
) -> cv2.typing.MatLike: ...
-
@typing.overload
def apply(self, image: UMat, fgmask: UMat | None = ..., learningRate: float = ...) -> UMat: ...
-
class BackgroundSubtractorKNN(BackgroundSubtractor):
# Functions
def getHistory(self) -> int: ...
-
def setHistory(self, history: int) -> None: ...
-
def getNSamples(self) -> int: ...
-
def setNSamples(self, _nN: int) -> None: ...
-
def getDist2Threshold(self) -> float: ...
-
def setDist2Threshold(self, _dist2Threshold: float) -> None: ...
-
def getkNNSamples(self) -> int: ...
-
def setkNNSamples(self, _nkNN: int) -> None: ...
-
def getDetectShadows(self) -> bool: ...
-
def setDetectShadows(self, detectShadows: bool) -> None: ...
-
def getShadowValue(self) -> int: ...
-
def setShadowValue(self, value: int) -> None: ...
-
def getShadowThreshold(self) -> float: ...
-
def setShadowThreshold(self, threshold: float) -> None: ...
-
class FarnebackOpticalFlow(DenseOpticalFlow):
# Functions
def getNumLevels(self) -> int: ...
-
def setNumLevels(self, numLevels: int) -> None: ...
-
def getPyrScale(self) -> float: ...
-
def setPyrScale(self, pyrScale: float) -> None: ...
-
def getFastPyramids(self) -> bool: ...
-
def setFastPyramids(self, fastPyramids: bool) -> None: ...
-
def getWinSize(self) -> int: ...
-
def setWinSize(self, winSize: int) -> None: ...
-
def getNumIters(self) -> int: ...
-
def setNumIters(self, numIters: int) -> None: ...
-
def getPolyN(self) -> int: ...
-
def setPolyN(self, polyN: int) -> None: ...
-
def getPolySigma(self) -> float: ...
-
def setPolySigma(self, polySigma: float) -> None: ...
-
def getFlags(self) -> int: ...
-
def setFlags(self, flags: int) -> None: ...
-
@classmethod
def create(
cls,
@@ -5385,7 +4741,6 @@ class FarnebackOpticalFlow(DenseOpticalFlow):
flags: int = ...,
) -> FarnebackOpticalFlow: ...
-
class VariationalRefinement(DenseOpticalFlow):
# Functions
@typing.overload
@@ -5399,113 +4754,70 @@ class VariationalRefinement(DenseOpticalFlow):
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
@typing.overload
def calcUV(self, I0: UMat, I1: UMat, flow_u: UMat, flow_v: UMat) -> tuple[UMat, UMat]: ...
-
def getFixedPointIterations(self) -> int: ...
-
def setFixedPointIterations(self, val: int) -> None: ...
-
def getSorIterations(self) -> int: ...
-
def setSorIterations(self, val: int) -> None: ...
-
def getOmega(self) -> float: ...
-
def setOmega(self, val: float) -> None: ...
-
def getAlpha(self) -> float: ...
-
def setAlpha(self, val: float) -> None: ...
-
def getDelta(self) -> float: ...
-
def setDelta(self, val: float) -> None: ...
-
def getGamma(self) -> float: ...
-
def setGamma(self, val: float) -> None: ...
-
@classmethod
def create(cls) -> VariationalRefinement: ...
-
class DISOpticalFlow(DenseOpticalFlow):
# Functions
def getFinestScale(self) -> int: ...
-
def setFinestScale(self, val: int) -> None: ...
-
def getPatchSize(self) -> int: ...
-
def setPatchSize(self, val: int) -> None: ...
-
def getPatchStride(self) -> int: ...
-
def setPatchStride(self, val: int) -> None: ...
-
def getGradientDescentIterations(self) -> int: ...
-
def setGradientDescentIterations(self, val: int) -> None: ...
-
def getVariationalRefinementIterations(self) -> int: ...
-
def setVariationalRefinementIterations(self, val: int) -> None: ...
-
def getVariationalRefinementAlpha(self) -> float: ...
-
def setVariationalRefinementAlpha(self, val: float) -> None: ...
-
def getVariationalRefinementDelta(self) -> float: ...
-
def setVariationalRefinementDelta(self, val: float) -> None: ...
-
def getVariationalRefinementGamma(self) -> float: ...
-
def setVariationalRefinementGamma(self, val: float) -> None: ...
-
def getUseMeanNormalization(self) -> bool: ...
-
def setUseMeanNormalization(self, val: bool) -> None: ...
-
def getUseSpatialPropagation(self) -> bool: ...
-
def setUseSpatialPropagation(self, val: bool) -> None: ...
-
@classmethod
def create(cls, preset: int = ...) -> DISOpticalFlow: ...
-
class SparsePyrLKOpticalFlow(SparseOpticalFlow):
# Functions
def getWinSize(self) -> cv2.typing.Size: ...
-
def setWinSize(self, winSize: cv2.typing.Size) -> None: ...
-
def getMaxLevel(self) -> int: ...
-
def setMaxLevel(self, maxLevel: int) -> None: ...
-
def getTermCriteria(self) -> cv2.typing.TermCriteria: ...
-
def setTermCriteria(self, crit: cv2.typing.TermCriteria) -> None: ...
-
def getFlags(self) -> int: ...
-
def setFlags(self, flags: int) -> None: ...
-
def getMinEigThreshold(self) -> float: ...
-
def setMinEigThreshold(self, minEigThreshold: float) -> None: ...
-
@classmethod
def create(
- cls, winSize: cv2.typing.Size = ..., maxLevel: int = ..., crit: cv2.typing.TermCriteria = ...,
- flags: int = ..., minEigThreshold: float = ...,
+ cls,
+ winSize: cv2.typing.Size = ...,
+ maxLevel: int = ...,
+ crit: cv2.typing.TermCriteria = ...,
+ flags: int = ...,
+ minEigThreshold: float = ...,
) -> SparsePyrLKOpticalFlow: ...
-
# Functions
@typing.overload
def CamShift(
@@ -5516,8 +4828,6 @@ def CamShift(
cv2.typing.RotatedRect,
cv2.typing.Rect,
]: ...
-
-
@typing.overload
def CamShift(
probImage: UMat,
@@ -5527,15 +4837,15 @@ def CamShift(
cv2.typing.RotatedRect,
cv2.typing.Rect,
]: ...
-
-
@typing.overload
def Canny(
- image: cv2.typing.MatLike, threshold1: float, threshold2: float, edges: cv2.typing.MatLike |
- None = ..., apertureSize: int = ..., L2gradient: bool = ...,
+ image: cv2.typing.MatLike,
+ threshold1: float,
+ threshold2: float,
+ edges: cv2.typing.MatLike | None = ...,
+ apertureSize: int = ...,
+ L2gradient: bool = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def Canny(
image: UMat,
@@ -5545,22 +4855,24 @@ def Canny(
apertureSize: int = ...,
L2gradient: bool = ...,
) -> UMat: ...
-
-
@typing.overload
def Canny(
- dx: cv2.typing.MatLike, dy: cv2.typing.MatLike, threshold1: float, threshold2: float,
- edges: cv2.typing.MatLike | None = ..., L2gradient: bool = ...,
+ dx: cv2.typing.MatLike,
+ dy: cv2.typing.MatLike,
+ threshold1: float,
+ threshold2: float,
+ edges: cv2.typing.MatLike | None = ...,
+ L2gradient: bool = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def Canny(
- dx: UMat, dy: UMat, threshold1: float, threshold2: float,
- edges: UMat | None = ..., L2gradient: bool = ...,
+ dx: UMat,
+ dy: UMat,
+ threshold1: float,
+ threshold2: float,
+ edges: UMat | None = ...,
+ L2gradient: bool = ...,
) -> UMat: ...
-
-
@typing.overload
def EMD(
signature1: cv2.typing.MatLike,
@@ -5574,8 +4886,6 @@ def EMD(
float,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def EMD(
signature1: UMat,
@@ -5589,8 +4899,6 @@ def EMD(
float,
UMat,
]: ...
-
-
@typing.overload
def GaussianBlur(
src: cv2.typing.MatLike,
@@ -5600,8 +4908,6 @@ def GaussianBlur(
sigmaY: float = ...,
borderType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def GaussianBlur(
src: UMat,
@@ -5611,8 +4917,6 @@ def GaussianBlur(
sigmaY: float = ...,
borderType: int = ...,
) -> UMat: ...
-
-
@typing.overload
def HoughCircles(
image: cv2.typing.MatLike,
@@ -5625,8 +4929,6 @@ def HoughCircles(
minRadius: int = ...,
maxRadius: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def HoughCircles(
image: UMat,
@@ -5639,8 +4941,6 @@ def HoughCircles(
minRadius: int = ...,
maxRadius: int = ...,
) -> UMat: ...
-
-
@typing.overload
def HoughLines(
image: cv2.typing.MatLike,
@@ -5653,8 +4953,6 @@ def HoughLines(
min_theta: float = ...,
max_theta: float = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def HoughLines(
image: UMat,
@@ -5667,8 +4965,6 @@ def HoughLines(
min_theta: float = ...,
max_theta: float = ...,
) -> UMat: ...
-
-
@typing.overload
def HoughLinesP(
image: cv2.typing.MatLike,
@@ -5679,8 +4975,6 @@ def HoughLinesP(
minLineLength: float = ...,
maxLineGap: float = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def HoughLinesP(
image: UMat,
@@ -5691,8 +4985,6 @@ def HoughLinesP(
minLineLength: float = ...,
maxLineGap: float = ...,
) -> UMat: ...
-
-
@typing.overload
def HoughLinesPointSet(
point: cv2.typing.MatLike,
@@ -5706,8 +4998,6 @@ def HoughLinesPointSet(
theta_step: float,
lines: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def HoughLinesPointSet(
point: UMat,
@@ -5721,8 +5011,6 @@ def HoughLinesPointSet(
theta_step: float,
lines: UMat | None = ...,
) -> UMat: ...
-
-
@typing.overload
def HoughLinesWithAccumulator(
image: cv2.typing.MatLike,
@@ -5735,8 +5023,6 @@ def HoughLinesWithAccumulator(
min_theta: float = ...,
max_theta: float = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def HoughLinesWithAccumulator(
image: UMat,
@@ -5749,32 +5035,28 @@ def HoughLinesWithAccumulator(
min_theta: float = ...,
max_theta: float = ...,
) -> UMat: ...
-
-
@typing.overload
def HuMoments(m: cv2.typing.Moments, hu: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def HuMoments(m: cv2.typing.Moments, hu: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def LUT(
- src: cv2.typing.MatLike, lut: cv2.typing.MatLike,
+ src: cv2.typing.MatLike,
+ lut: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def LUT(src: UMat, lut: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def Laplacian(
- src: cv2.typing.MatLike, ddepth: int, dst: cv2.typing.MatLike | None = ..., ksize: int = ...,
- scale: float = ..., delta: float = ..., borderType: int = ...,
+ src: cv2.typing.MatLike,
+ ddepth: int,
+ dst: cv2.typing.MatLike | None = ...,
+ ksize: int = ...,
+ scale: float = ...,
+ delta: float = ...,
+ borderType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def Laplacian(
src: UMat,
@@ -5785,14 +5067,10 @@ def Laplacian(
delta: float = ...,
borderType: int = ...,
) -> UMat: ...
-
-
@typing.overload
def Mahalanobis(v1: cv2.typing.MatLike, v2: cv2.typing.MatLike, icovar: cv2.typing.MatLike) -> float: ...
@typing.overload
def Mahalanobis(v1: UMat, v2: UMat, icovar: UMat) -> float: ...
-
-
@typing.overload
def PCABackProject(
data: cv2.typing.MatLike,
@@ -5800,12 +5078,8 @@ def PCABackProject(
eigenvectors: cv2.typing.MatLike,
result: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def PCABackProject(data: UMat, mean: UMat, eigenvectors: UMat, result: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def PCACompute(
data: cv2.typing.MatLike,
@@ -5816,8 +5090,6 @@ def PCACompute(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def PCACompute(
data: UMat,
@@ -5828,8 +5100,6 @@ def PCACompute(
UMat,
UMat,
]: ...
-
-
@typing.overload
def PCACompute(
data: cv2.typing.MatLike,
@@ -5840,8 +5110,6 @@ def PCACompute(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def PCACompute(
data: UMat,
@@ -5852,8 +5120,6 @@ def PCACompute(
UMat,
UMat,
]: ...
-
-
@typing.overload
def PCACompute2(
data: cv2.typing.MatLike,
@@ -5866,8 +5132,6 @@ def PCACompute2(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def PCACompute2(
data: UMat,
@@ -5880,8 +5144,6 @@ def PCACompute2(
UMat,
UMat,
]: ...
-
-
@typing.overload
def PCACompute2(
data: cv2.typing.MatLike,
@@ -5894,8 +5156,6 @@ def PCACompute2(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def PCACompute2(
data: UMat,
@@ -5908,25 +5168,19 @@ def PCACompute2(
UMat,
UMat,
]: ...
-
-
@typing.overload
def PCAProject(
- data: cv2.typing.MatLike, mean: cv2.typing.MatLike, eigenvectors: cv2.typing.MatLike,
+ data: cv2.typing.MatLike,
+ mean: cv2.typing.MatLike,
+ eigenvectors: cv2.typing.MatLike,
result: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def PCAProject(data: UMat, mean: UMat, eigenvectors: UMat, result: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def PSNR(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, R: float = ...) -> float: ...
@typing.overload
def PSNR(src1: UMat, src2: UMat, R: float = ...) -> float: ...
-
-
@typing.overload
def RQDecomp3x3(
src: cv2.typing.MatLike,
@@ -5943,8 +5197,6 @@ def RQDecomp3x3(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def RQDecomp3x3(
src: UMat,
@@ -5961,8 +5213,6 @@ def RQDecomp3x3(
UMat,
UMat,
]: ...
-
-
@typing.overload
def Rodrigues(
src: cv2.typing.MatLike,
@@ -5972,12 +5222,8 @@ def Rodrigues(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def Rodrigues(src: UMat, dst: UMat | None = ..., jacobian: UMat | None = ...) -> tuple[UMat, UMat]: ...
-
-
@typing.overload
def SVBackSubst(
w: cv2.typing.MatLike,
@@ -5986,12 +5232,8 @@ def SVBackSubst(
rhs: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def SVBackSubst(w: UMat, u: UMat, vt: UMat, rhs: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def SVDecomp(
src: cv2.typing.MatLike,
@@ -6004,8 +5246,6 @@ def SVDecomp(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def SVDecomp(
src: UMat,
@@ -6018,15 +5258,17 @@ def SVDecomp(
UMat,
UMat,
]: ...
-
-
@typing.overload
def Scharr(
- src: cv2.typing.MatLike, ddepth: int, dx: int, dy: int, dst: cv2.typing.MatLike | None = ...,
- scale: float = ..., delta: float = ..., borderType: int = ...,
+ src: cv2.typing.MatLike,
+ ddepth: int,
+ dx: int,
+ dy: int,
+ dst: cv2.typing.MatLike | None = ...,
+ scale: float = ...,
+ delta: float = ...,
+ borderType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def Scharr(
src: UMat,
@@ -6038,15 +5280,18 @@ def Scharr(
delta: float = ...,
borderType: int = ...,
) -> UMat: ...
-
-
@typing.overload
def Sobel(
- src: cv2.typing.MatLike, ddepth: int, dx: int, dy: int, dst: cv2.typing.MatLike | None = ...,
- ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ...,
+ src: cv2.typing.MatLike,
+ ddepth: int,
+ dx: int,
+ dy: int,
+ dst: cv2.typing.MatLike | None = ...,
+ ksize: int = ...,
+ scale: float = ...,
+ delta: float = ...,
+ borderType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def Sobel(
src: UMat,
@@ -6059,63 +5304,48 @@ def Sobel(
delta: float = ...,
borderType: int = ...,
) -> UMat: ...
-
-
@typing.overload
def absdiff(
- src1: cv2.typing.MatLike, src2: cv2.typing.MatLike,
+ src1: cv2.typing.MatLike,
+ src2: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def absdiff(src1: UMat, src2: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def accumulate(
- src: cv2.typing.MatLike, dst: cv2.typing.MatLike,
+ src: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike,
mask: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def accumulate(src: UMat, dst: UMat, mask: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def accumulateProduct(
- src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike,
+ src1: cv2.typing.MatLike,
+ src2: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike,
mask: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def accumulateProduct(src1: UMat, src2: UMat, dst: UMat, mask: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def accumulateSquare(
- src: cv2.typing.MatLike, dst: cv2.typing.MatLike,
+ src: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike,
mask: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def accumulateSquare(src: UMat, dst: UMat, mask: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def accumulateWeighted(
- src: cv2.typing.MatLike, dst: cv2.typing.MatLike, alpha: float,
+ src: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike,
+ alpha: float,
mask: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def accumulateWeighted(src: UMat, dst: UMat, alpha: float, mask: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def adaptiveThreshold(
src: cv2.typing.MatLike,
@@ -6126,8 +5356,6 @@ def adaptiveThreshold(
C: float,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def adaptiveThreshold(
src: UMat,
@@ -6138,19 +5366,16 @@ def adaptiveThreshold(
C: float,
dst: UMat | None = ...,
) -> UMat: ...
-
-
@typing.overload
def add(
- src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...,
- mask: cv2.typing.MatLike | None = ..., dtype: int = ...,
+ src1: cv2.typing.MatLike,
+ src2: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike | None = ...,
+ mask: cv2.typing.MatLike | None = ...,
+ dtype: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def add(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ..., dtype: int = ...) -> UMat: ...
-
-
def addText(
img: cv2.typing.MatLike,
text: str,
@@ -6162,8 +5387,6 @@ def addText(
style: int = ...,
spacing: int = ...,
) -> None: ...
-
-
@typing.overload
def addWeighted(
src1: cv2.typing.MatLike,
@@ -6174,8 +5397,6 @@ def addWeighted(
dst: cv2.typing.MatLike | None = ...,
dtype: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def addWeighted(
src1: UMat,
@@ -6186,48 +5407,35 @@ def addWeighted(
dst: UMat | None = ...,
dtype: int = ...,
) -> UMat: ...
-
-
@typing.overload
def applyColorMap(
src: cv2.typing.MatLike,
colormap: int,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def applyColorMap(src: UMat, colormap: int, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def applyColorMap(
- src: cv2.typing.MatLike, userColor: cv2.typing.MatLike,
+ src: cv2.typing.MatLike,
+ userColor: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def applyColorMap(src: UMat, userColor: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def approxPolyDP(
- curve: cv2.typing.MatLike, epsilon: float, closed: bool,
+ curve: cv2.typing.MatLike,
+ epsilon: float,
+ closed: bool,
approxCurve: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def approxPolyDP(curve: UMat, epsilon: float, closed: bool, approxCurve: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def arcLength(curve: cv2.typing.MatLike, closed: bool) -> float: ...
@typing.overload
def arcLength(curve: UMat, closed: bool) -> float: ...
-
-
@typing.overload
def arrowedLine(
img: cv2.typing.MatLike,
@@ -6239,8 +5447,6 @@ def arrowedLine(
shift: int = ...,
tipLength: float = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def arrowedLine(
img: UMat,
@@ -6252,8 +5458,6 @@ def arrowedLine(
shift: int = ...,
tipLength: float = ...,
) -> UMat: ...
-
-
@typing.overload
def batchDistance(
src1: cv2.typing.MatLike,
@@ -6270,8 +5474,6 @@ def batchDistance(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def batchDistance(
src1: UMat,
@@ -6288,8 +5490,6 @@ def batchDistance(
UMat,
UMat,
]: ...
-
-
@typing.overload
def bilateralFilter(
src: cv2.typing.MatLike,
@@ -6299,8 +5499,6 @@ def bilateralFilter(
dst: cv2.typing.MatLike | None = ...,
borderType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def bilateralFilter(
src: UMat,
@@ -6310,70 +5508,59 @@ def bilateralFilter(
dst: UMat | None = ...,
borderType: int = ...,
) -> UMat: ...
-
-
@typing.overload
def bitwise_and(
- src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike |
- None = ..., mask: cv2.typing.MatLike | None = ...,
+ src1: cv2.typing.MatLike,
+ src2: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike | None = ...,
+ mask: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def bitwise_and(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def bitwise_not(
- src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...,
+ src: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike | None = ...,
mask: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def bitwise_not(src: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def bitwise_or(
- src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike |
- None = ..., mask: cv2.typing.MatLike | None = ...,
+ src1: cv2.typing.MatLike,
+ src2: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike | None = ...,
+ mask: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def bitwise_or(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def bitwise_xor(
- src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike |
- None = ..., mask: cv2.typing.MatLike | None = ...,
+ src1: cv2.typing.MatLike,
+ src2: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike | None = ...,
+ mask: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def bitwise_xor(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def blendLinear(
- src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, weights1: cv2.typing.MatLike,
- weights2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...,
+ src1: cv2.typing.MatLike,
+ src2: cv2.typing.MatLike,
+ weights1: cv2.typing.MatLike,
+ weights2: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def blendLinear(src1: UMat, src2: UMat, weights1: UMat, weights2: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def blur(
- src: cv2.typing.MatLike, ksize: cv2.typing.Size, dst: cv2.typing.MatLike | None = ...,
- anchor: cv2.typing.Point = ..., borderType: int = ...,
+ src: cv2.typing.MatLike,
+ ksize: cv2.typing.Size,
+ dst: cv2.typing.MatLike | None = ...,
+ anchor: cv2.typing.Point = ...,
+ borderType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def blur(
src: UMat,
@@ -6382,17 +5569,11 @@ def blur(
anchor: cv2.typing.Point = ...,
borderType: int = ...,
) -> UMat: ...
-
-
def borderInterpolate(p: int, len: int, borderType: int) -> int: ...
-
-
@typing.overload
def boundingRect(array: cv2.typing.MatLike) -> cv2.typing.Rect: ...
@typing.overload
def boundingRect(array: UMat) -> cv2.typing.Rect: ...
-
-
@typing.overload
def boxFilter(
src: cv2.typing.MatLike,
@@ -6403,8 +5584,6 @@ def boxFilter(
normalize: bool = ...,
borderType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def boxFilter(
src: UMat,
@@ -6415,14 +5594,10 @@ def boxFilter(
normalize: bool = ...,
borderType: int = ...,
) -> UMat: ...
-
-
@typing.overload
def boxPoints(box: cv2.typing.RotatedRect, points: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def boxPoints(box: cv2.typing.RotatedRect, points: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def buildOpticalFlowPyramid(
img: cv2.typing.MatLike,
@@ -6437,8 +5612,6 @@ def buildOpticalFlowPyramid(
int,
typing.Sequence[cv2.typing.MatLike],
]: ...
-
-
@typing.overload
def buildOpticalFlowPyramid(
img: UMat,
@@ -6453,8 +5626,6 @@ def buildOpticalFlowPyramid(
int,
typing.Sequence[UMat],
]: ...
-
-
@typing.overload
def calcBackProject(
images: typing.Sequence[cv2.typing.MatLike],
@@ -6464,8 +5635,6 @@ def calcBackProject(
scale: float,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def calcBackProject(
images: typing.Sequence[UMat],
@@ -6475,8 +5644,6 @@ def calcBackProject(
scale: float,
dst: UMat | None = ...,
) -> UMat: ...
-
-
@typing.overload
def calcCovarMatrix(
samples: cv2.typing.MatLike,
@@ -6488,8 +5655,6 @@ def calcCovarMatrix(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def calcCovarMatrix(
samples: UMat,
@@ -6501,8 +5666,6 @@ def calcCovarMatrix(
UMat,
UMat,
]: ...
-
-
@typing.overload
def calcHist(
images: typing.Sequence[cv2.typing.MatLike],
@@ -6513,8 +5676,6 @@ def calcHist(
hist: cv2.typing.MatLike | None = ...,
accumulate: bool = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def calcHist(
images: typing.Sequence[UMat],
@@ -6525,8 +5686,6 @@ def calcHist(
hist: UMat | None = ...,
accumulate: bool = ...,
) -> UMat: ...
-
-
@typing.overload
def calcOpticalFlowFarneback(
prev: cv2.typing.MatLike,
@@ -6540,8 +5699,6 @@ def calcOpticalFlowFarneback(
poly_sigma: float,
flags: int,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def calcOpticalFlowFarneback(
prev: UMat,
@@ -6555,8 +5712,6 @@ def calcOpticalFlowFarneback(
poly_sigma: float,
flags: int,
) -> UMat: ...
-
-
@typing.overload
def calcOpticalFlowPyrLK(
prevImg: cv2.typing.MatLike,
@@ -6575,8 +5730,6 @@ def calcOpticalFlowPyrLK(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def calcOpticalFlowPyrLK(
prevImg: UMat,
@@ -6595,8 +5748,6 @@ def calcOpticalFlowPyrLK(
UMat,
UMat,
]: ...
-
-
@typing.overload
def calibrateCamera(
objectPoints: typing.Sequence[cv2.typing.MatLike],
@@ -6615,8 +5766,6 @@ def calibrateCamera(
typing.Sequence[cv2.typing.MatLike],
typing.Sequence[cv2.typing.MatLike],
]: ...
-
-
@typing.overload
def calibrateCamera(
objectPoints: typing.Sequence[UMat],
@@ -6635,8 +5784,6 @@ def calibrateCamera(
typing.Sequence[UMat],
typing.Sequence[UMat],
]: ...
-
-
@typing.overload
def calibrateCameraExtended(
objectPoints: typing.Sequence[cv2.typing.MatLike],
@@ -6661,8 +5808,6 @@ def calibrateCameraExtended(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def calibrateCameraExtended(
objectPoints: typing.Sequence[UMat],
@@ -6687,8 +5832,6 @@ def calibrateCameraExtended(
UMat,
UMat,
]: ...
-
-
@typing.overload
def calibrateCameraRO(
objectPoints: typing.Sequence[cv2.typing.MatLike],
@@ -6710,8 +5853,6 @@ def calibrateCameraRO(
typing.Sequence[cv2.typing.MatLike],
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def calibrateCameraRO(
objectPoints: typing.Sequence[UMat],
@@ -6733,8 +5874,6 @@ def calibrateCameraRO(
typing.Sequence[UMat],
UMat,
]: ...
-
-
@typing.overload
def calibrateCameraROExtended(
objectPoints: typing.Sequence[cv2.typing.MatLike],
@@ -6764,8 +5903,6 @@ def calibrateCameraROExtended(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def calibrateCameraROExtended(
objectPoints: typing.Sequence[UMat],
@@ -6795,8 +5932,6 @@ def calibrateCameraROExtended(
UMat,
UMat,
]: ...
-
-
@typing.overload
def calibrateHandEye(
R_gripper2base: typing.Sequence[cv2.typing.MatLike],
@@ -6810,8 +5945,6 @@ def calibrateHandEye(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def calibrateHandEye(
R_gripper2base: typing.Sequence[UMat],
@@ -6825,8 +5958,6 @@ def calibrateHandEye(
UMat,
UMat,
]: ...
-
-
@typing.overload
def calibrateRobotWorldHandEye(
R_world2cam: typing.Sequence[cv2.typing.MatLike],
@@ -6844,8 +5975,6 @@ def calibrateRobotWorldHandEye(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def calibrateRobotWorldHandEye(
R_world2cam: typing.Sequence[UMat],
@@ -6863,8 +5992,6 @@ def calibrateRobotWorldHandEye(
UMat,
UMat,
]: ...
-
-
@typing.overload
def calibrationMatrixValues(
cameraMatrix: cv2.typing.MatLike,
@@ -6878,8 +6005,6 @@ def calibrationMatrixValues(
cv2.typing.Point2d,
float,
]: ...
-
-
@typing.overload
def calibrationMatrixValues(
cameraMatrix: UMat,
@@ -6893,8 +6018,6 @@ def calibrationMatrixValues(
cv2.typing.Point2d,
float,
]: ...
-
-
@typing.overload
def cartToPolar(
x: cv2.typing.MatLike,
@@ -6906,8 +6029,6 @@ def cartToPolar(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def cartToPolar(
x: UMat,
@@ -6919,17 +6040,11 @@ def cartToPolar(
UMat,
UMat,
]: ...
-
-
@typing.overload
def checkChessboard(img: cv2.typing.MatLike, size: cv2.typing.Size) -> bool: ...
@typing.overload
def checkChessboard(img: UMat, size: cv2.typing.Size) -> bool: ...
-
-
def checkHardwareSupport(feature: int) -> bool: ...
-
-
@typing.overload
def checkRange(
a: cv2.typing.MatLike,
@@ -6940,8 +6055,6 @@ def checkRange(
bool,
cv2.typing.Point,
]: ...
-
-
@typing.overload
def checkRange(
a: UMat,
@@ -6952,15 +6065,16 @@ def checkRange(
bool,
cv2.typing.Point,
]: ...
-
-
@typing.overload
def circle(
- img: cv2.typing.MatLike, center: cv2.typing.Point, radius: int, color: cv2.typing.Scalar,
- thickness: int = ..., lineType: int = ..., shift: int = ...,
-) -> cv2.typing.MatLike: ...
-
-
+ img: cv2.typing.MatLike,
+ center: cv2.typing.Point,
+ radius: int,
+ color: cv2.typing.Scalar,
+ thickness: int = ...,
+ lineType: int = ...,
+ shift: int = ...,
+) -> cv2.typing.MatLike: ...
@typing.overload
def circle(
img: UMat,
@@ -6971,8 +6085,6 @@ def circle(
lineType: int = ...,
shift: int = ...,
) -> UMat: ...
-
-
def clipLine(
imgRect: cv2.typing.Rect,
pt1: cv2.typing.Point,
@@ -6982,15 +6094,15 @@ def clipLine(
cv2.typing.Point,
cv2.typing.Point,
]: ...
-
-
@typing.overload
def colorChange(
- src: cv2.typing.MatLike, mask: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...,
- red_mul: float = ..., green_mul: float = ..., blue_mul: float = ...,
+ src: cv2.typing.MatLike,
+ mask: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike | None = ...,
+ red_mul: float = ...,
+ green_mul: float = ...,
+ blue_mul: float = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def colorChange(
src: UMat,
@@ -7000,31 +6112,23 @@ def colorChange(
green_mul: float = ...,
blue_mul: float = ...,
) -> UMat: ...
-
-
@typing.overload
def compare(
- src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, cmpop: int,
+ src1: cv2.typing.MatLike,
+ src2: cv2.typing.MatLike,
+ cmpop: int,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def compare(src1: UMat, src2: UMat, cmpop: int, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def compareHist(H1: cv2.typing.MatLike, H2: cv2.typing.MatLike, method: int) -> float: ...
@typing.overload
def compareHist(H1: UMat, H2: UMat, method: int) -> float: ...
-
-
@typing.overload
def completeSymm(m: cv2.typing.MatLike, lowerToUpper: bool = ...) -> cv2.typing.MatLike: ...
@typing.overload
def completeSymm(m: UMat, lowerToUpper: bool = ...) -> UMat: ...
-
-
@typing.overload
def composeRT(
rvec1: cv2.typing.MatLike,
@@ -7053,8 +6157,6 @@ def composeRT(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def composeRT(
rvec1: UMat,
@@ -7083,8 +6185,6 @@ def composeRT(
UMat,
UMat,
]: ...
-
-
@typing.overload
def computeCorrespondEpilines(
points: cv2.typing.MatLike,
@@ -7092,23 +6192,16 @@ def computeCorrespondEpilines(
F: cv2.typing.MatLike,
lines: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def computeCorrespondEpilines(points: UMat, whichImage: int, F: UMat, lines: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def computeECC(
- templateImage: cv2.typing.MatLike, inputImage: cv2.typing.MatLike,
+ templateImage: cv2.typing.MatLike,
+ inputImage: cv2.typing.MatLike,
inputMask: cv2.typing.MatLike | None = ...,
) -> float: ...
-
-
@typing.overload
def computeECC(templateImage: UMat, inputImage: UMat, inputMask: UMat | None = ...) -> float: ...
-
-
@typing.overload
def connectedComponents(
image: cv2.typing.MatLike,
@@ -7119,8 +6212,6 @@ def connectedComponents(
int,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def connectedComponents(
image: UMat,
@@ -7131,8 +6222,6 @@ def connectedComponents(
int,
UMat,
]: ...
-
-
@typing.overload
def connectedComponentsWithAlgorithm(
image: cv2.typing.MatLike,
@@ -7144,8 +6233,6 @@ def connectedComponentsWithAlgorithm(
int,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def connectedComponentsWithAlgorithm(
image: UMat,
@@ -7157,8 +6244,6 @@ def connectedComponentsWithAlgorithm(
int,
UMat,
]: ...
-
-
@typing.overload
def connectedComponentsWithStats(
image: cv2.typing.MatLike,
@@ -7173,8 +6258,6 @@ def connectedComponentsWithStats(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def connectedComponentsWithStats(
image: UMat,
@@ -7189,8 +6272,6 @@ def connectedComponentsWithStats(
UMat,
UMat,
]: ...
-
-
@typing.overload
def connectedComponentsWithStatsWithAlgorithm(
image: cv2.typing.MatLike,
@@ -7206,8 +6287,6 @@ def connectedComponentsWithStatsWithAlgorithm(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def connectedComponentsWithStatsWithAlgorithm(
image: UMat,
@@ -7223,20 +6302,14 @@ def connectedComponentsWithStatsWithAlgorithm(
UMat,
UMat,
]: ...
-
-
@typing.overload
def contourArea(contour: cv2.typing.MatLike, oriented: bool = ...) -> float: ...
@typing.overload
def contourArea(contour: UMat, oriented: bool = ...) -> float: ...
-
-
@typing.overload
def convertFp16(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def convertFp16(src: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def convertMaps(
map1: cv2.typing.MatLike,
@@ -7249,8 +6322,6 @@ def convertMaps(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def convertMaps(
map1: UMat,
@@ -7263,58 +6334,43 @@ def convertMaps(
UMat,
UMat,
]: ...
-
-
@typing.overload
def convertPointsFromHomogeneous(
src: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def convertPointsFromHomogeneous(src: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def convertPointsToHomogeneous(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def convertPointsToHomogeneous(src: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def convertScaleAbs(
- src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...,
- alpha: float = ..., beta: float = ...,
+ src: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike | None = ...,
+ alpha: float = ...,
+ beta: float = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def convertScaleAbs(src: UMat, dst: UMat | None = ..., alpha: float = ..., beta: float = ...) -> UMat: ...
-
-
@typing.overload
def convexHull(
- points: cv2.typing.MatLike, hull: cv2.typing.MatLike | None = ...,
- clockwise: bool = ..., returnPoints: bool = ...,
+ points: cv2.typing.MatLike,
+ hull: cv2.typing.MatLike | None = ...,
+ clockwise: bool = ...,
+ returnPoints: bool = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def convexHull(points: UMat, hull: UMat | None = ..., clockwise: bool = ..., returnPoints: bool = ...) -> UMat: ...
-
-
@typing.overload
def convexityDefects(
- contour: cv2.typing.MatLike, convexhull: cv2.typing.MatLike,
+ contour: cv2.typing.MatLike,
+ convexhull: cv2.typing.MatLike,
convexityDefects: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def convexityDefects(contour: UMat, convexhull: UMat, convexityDefects: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def copyMakeBorder(
src: cv2.typing.MatLike,
@@ -7326,26 +6382,25 @@ def copyMakeBorder(
dst: cv2.typing.MatLike | None = ...,
value: cv2.typing.Scalar = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def copyMakeBorder(
- src: UMat, top: int, bottom: int, left: int, right: int, borderType: int,
- dst: UMat | None = ..., value: cv2.typing.Scalar = ...,
+ src: UMat,
+ top: int,
+ bottom: int,
+ left: int,
+ right: int,
+ borderType: int,
+ dst: UMat | None = ...,
+ value: cv2.typing.Scalar = ...,
) -> UMat: ...
-
-
@typing.overload
def copyTo(
- src: cv2.typing.MatLike, mask: cv2.typing.MatLike,
+ src: cv2.typing.MatLike,
+ mask: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def copyTo(src: UMat, mask: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def cornerEigenValsAndVecs(
src: cv2.typing.MatLike,
@@ -7354,8 +6409,6 @@ def cornerEigenValsAndVecs(
dst: cv2.typing.MatLike | None = ...,
borderType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def cornerEigenValsAndVecs(
src: UMat,
@@ -7364,8 +6417,6 @@ def cornerEigenValsAndVecs(
dst: UMat | None = ...,
borderType: int = ...,
) -> UMat: ...
-
-
@typing.overload
def cornerHarris(
src: cv2.typing.MatLike,
@@ -7375,8 +6426,6 @@ def cornerHarris(
dst: cv2.typing.MatLike | None = ...,
borderType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def cornerHarris(
src: UMat,
@@ -7386,8 +6435,6 @@ def cornerHarris(
dst: UMat | None = ...,
borderType: int = ...,
) -> UMat: ...
-
-
@typing.overload
def cornerMinEigenVal(
src: cv2.typing.MatLike,
@@ -7396,8 +6443,6 @@ def cornerMinEigenVal(
ksize: int = ...,
borderType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def cornerMinEigenVal(
src: UMat,
@@ -7406,15 +6451,14 @@ def cornerMinEigenVal(
ksize: int = ...,
borderType: int = ...,
) -> UMat: ...
-
-
@typing.overload
def cornerSubPix(
- image: cv2.typing.MatLike, corners: cv2.typing.MatLike, winSize: cv2.typing.Size,
- zeroZone: cv2.typing.Size, criteria: cv2.typing.TermCriteria,
+ image: cv2.typing.MatLike,
+ corners: cv2.typing.MatLike,
+ winSize: cv2.typing.Size,
+ zeroZone: cv2.typing.Size,
+ criteria: cv2.typing.TermCriteria,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def cornerSubPix(
image: UMat,
@@ -7423,8 +6467,6 @@ def cornerSubPix(
zeroZone: cv2.typing.Size,
criteria: cv2.typing.TermCriteria,
) -> UMat: ...
-
-
@typing.overload
def correctMatches(
F: cv2.typing.MatLike,
@@ -7436,8 +6478,6 @@ def correctMatches(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def correctMatches(
F: UMat,
@@ -7449,56 +6489,34 @@ def correctMatches(
UMat,
UMat,
]: ...
-
-
@typing.overload
def countNonZero(src: cv2.typing.MatLike) -> int: ...
@typing.overload
def countNonZero(src: UMat) -> int: ...
-
-
def createAlignMTB(max_bits: int = ..., exclude_range: int = ..., cut: bool = ...) -> AlignMTB: ...
-
-
def createBackgroundSubtractorKNN(
- history: int = ..., dist2Threshold: float = ...,
+ history: int = ...,
+ dist2Threshold: float = ...,
detectShadows: bool = ...,
) -> BackgroundSubtractorKNN: ...
-
-
def createBackgroundSubtractorMOG2(
- history: int = ..., varThreshold: float = ...,
+ history: int = ...,
+ varThreshold: float = ...,
detectShadows: bool = ...,
) -> BackgroundSubtractorMOG2: ...
-
-
def createCLAHE(clipLimit: float = ..., tileGridSize: cv2.typing.Size = ...) -> CLAHE: ...
-
-
def createCalibrateDebevec(samples: int = ..., lambda_: float = ..., random: bool = ...) -> CalibrateDebevec: ...
-
-
def createCalibrateRobertson(max_iter: int = ..., threshold: float = ...) -> CalibrateRobertson: ...
-
-
def createGeneralizedHoughBallard() -> GeneralizedHoughBallard: ...
-
-
def createGeneralizedHoughGuil() -> GeneralizedHoughGuil: ...
-
-
@typing.overload
def createHanningWindow(
winSize: cv2.typing.Size,
type: int,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def createHanningWindow(winSize: cv2.typing.Size, type: int, dst: UMat | None = ...) -> UMat: ...
-
-
def createLineSegmentDetector(
refine: int = ...,
scale: float = ...,
@@ -7509,69 +6527,45 @@ def createLineSegmentDetector(
density_th: float = ...,
n_bins: int = ...,
) -> LineSegmentDetector: ...
-
-
def createMergeDebevec() -> MergeDebevec: ...
-
-
def createMergeMertens(
contrast_weight: float = ...,
saturation_weight: float = ...,
exposure_weight: float = ...,
) -> MergeMertens: ...
-
-
def createMergeRobertson() -> MergeRobertson: ...
-
-
def createTonemap(gamma: float = ...) -> Tonemap: ...
-
-
def createTonemapDrago(gamma: float = ..., saturation: float = ..., bias: float = ...) -> TonemapDrago: ...
-
-
def createTonemapMantiuk(gamma: float = ..., scale: float = ..., saturation: float = ...) -> TonemapMantiuk: ...
-
-
def createTonemapReinhard(
gamma: float = ...,
intensity: float = ...,
light_adapt: float = ...,
color_adapt: float = ...,
) -> TonemapReinhard: ...
-
-
def cubeRoot(val: float) -> float: ...
-
-
@typing.overload
def cvtColor(
- src: cv2.typing.MatLike, code: int, dst: cv2.typing.MatLike |
- None = ..., dstCn: int = ...,
+ src: cv2.typing.MatLike,
+ code: int,
+ dst: cv2.typing.MatLike | None = ...,
+ dstCn: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def cvtColor(src: UMat, code: int, dst: UMat | None = ..., dstCn: int = ...) -> UMat: ...
-
-
@typing.overload
def cvtColorTwoPlane(
- src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, code: int,
+ src1: cv2.typing.MatLike,
+ src2: cv2.typing.MatLike,
+ code: int,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def cvtColorTwoPlane(src1: UMat, src2: UMat, code: int, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def dct(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> cv2.typing.MatLike: ...
@typing.overload
def dct(src: UMat, dst: UMat | None = ..., flags: int = ...) -> UMat: ...
-
-
@typing.overload
def decolor(
src: cv2.typing.MatLike,
@@ -7581,12 +6575,8 @@ def decolor(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def decolor(src: UMat, grayscale: UMat | None = ..., color_boost: UMat | None = ...) -> tuple[UMat, UMat]: ...
-
-
@typing.overload
def decomposeEssentialMat(
E: cv2.typing.MatLike,
@@ -7598,8 +6588,6 @@ def decomposeEssentialMat(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def decomposeEssentialMat(
E: UMat,
@@ -7611,8 +6599,6 @@ def decomposeEssentialMat(
UMat,
UMat,
]: ...
-
-
@typing.overload
def decomposeHomographyMat(
H: cv2.typing.MatLike,
@@ -7626,8 +6612,6 @@ def decomposeHomographyMat(
typing.Sequence[cv2.typing.MatLike],
typing.Sequence[cv2.typing.MatLike],
]: ...
-
-
@typing.overload
def decomposeHomographyMat(
H: UMat,
@@ -7641,8 +6625,6 @@ def decomposeHomographyMat(
typing.Sequence[UMat],
typing.Sequence[UMat],
]: ...
-
-
@typing.overload
def decomposeProjectionMatrix(
projMatrix: cv2.typing.MatLike,
@@ -7662,8 +6644,6 @@ def decomposeProjectionMatrix(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def decomposeProjectionMatrix(
projMatrix: UMat,
@@ -7683,33 +6663,23 @@ def decomposeProjectionMatrix(
UMat,
UMat,
]: ...
-
-
@typing.overload
def demosaicing(
- src: cv2.typing.MatLike, code: int, dst: cv2.typing.MatLike |
- None = ..., dstCn: int = ...,
+ src: cv2.typing.MatLike,
+ code: int,
+ dst: cv2.typing.MatLike | None = ...,
+ dstCn: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def demosaicing(src: UMat, code: int, dst: UMat | None = ..., dstCn: int = ...) -> UMat: ...
-
-
def denoise_TVL1(
observations: typing.Sequence[cv2.typing.MatLike],
result: cv2.typing.MatLike,
lambda_: float = ...,
niters: int = ...,
) -> None: ...
-
-
def destroyAllWindows() -> None: ...
-
-
def destroyWindow(winname: str) -> None: ...
-
-
@typing.overload
def detailEnhance(
src: cv2.typing.MatLike,
@@ -7717,29 +6687,21 @@ def detailEnhance(
sigma_s: float = ...,
sigma_r: float = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def detailEnhance(src: UMat, dst: UMat | None = ..., sigma_s: float = ..., sigma_r: float = ...) -> UMat: ...
-
-
@typing.overload
def determinant(mtx: cv2.typing.MatLike) -> float: ...
@typing.overload
def determinant(mtx: UMat) -> float: ...
-
-
@typing.overload
def dft(
- src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...,
- flags: int = ..., nonzeroRows: int = ...,
+ src: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike | None = ...,
+ flags: int = ...,
+ nonzeroRows: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def dft(src: UMat, dst: UMat | None = ..., flags: int = ..., nonzeroRows: int = ...) -> UMat: ...
-
-
@typing.overload
def dilate(
src: cv2.typing.MatLike,
@@ -7750,8 +6712,6 @@ def dilate(
borderType: int = ...,
borderValue: cv2.typing.Scalar = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def dilate(
src: UMat,
@@ -7762,14 +6722,8 @@ def dilate(
borderType: int = ...,
borderValue: cv2.typing.Scalar = ...,
) -> UMat: ...
-
-
def displayOverlay(winname: str, text: str, delayms: int = ...) -> None: ...
-
-
def displayStatusBar(winname: str, text: str, delayms: int = ...) -> None: ...
-
-
@typing.overload
def distanceTransform(
src: cv2.typing.MatLike,
@@ -7778,8 +6732,6 @@ def distanceTransform(
dst: cv2.typing.MatLike | None = ...,
dstType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def distanceTransform(
src: UMat,
@@ -7788,8 +6740,6 @@ def distanceTransform(
dst: UMat | None = ...,
dstType: int = ...,
) -> UMat: ...
-
-
@typing.overload
def distanceTransformWithLabels(
src: cv2.typing.MatLike,
@@ -7802,8 +6752,6 @@ def distanceTransformWithLabels(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def distanceTransformWithLabels(
src: UMat,
@@ -7816,8 +6764,6 @@ def distanceTransformWithLabels(
UMat,
UMat,
]: ...
-
-
@typing.overload
def divSpectrums(
a: cv2.typing.MatLike,
@@ -7826,34 +6772,27 @@ def divSpectrums(
c: cv2.typing.MatLike | None = ...,
conjB: bool = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def divSpectrums(a: UMat, b: UMat, flags: int, c: UMat | None = ..., conjB: bool = ...) -> UMat: ...
-
-
@typing.overload
def divide(
- src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike |
- None = ..., scale: float = ..., dtype: int = ...,
+ src1: cv2.typing.MatLike,
+ src2: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike | None = ...,
+ scale: float = ...,
+ dtype: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def divide(src1: UMat, src2: UMat, dst: UMat | None = ..., scale: float = ..., dtype: int = ...) -> UMat: ...
-
-
@typing.overload
def divide(
- scale: float, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike |
- None = ..., dtype: int = ...,
+ scale: float,
+ src2: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike | None = ...,
+ dtype: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def divide(scale: float, src2: UMat, dst: UMat | None = ..., dtype: int = ...) -> UMat: ...
-
-
@typing.overload
def drawChessboardCorners(
image: cv2.typing.MatLike,
@@ -7861,12 +6800,8 @@ def drawChessboardCorners(
corners: cv2.typing.MatLike,
patternWasFound: bool,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def drawChessboardCorners(image: UMat, patternSize: cv2.typing.Size, corners: UMat, patternWasFound: bool) -> UMat: ...
-
-
@typing.overload
def drawContours(
image: cv2.typing.MatLike,
@@ -7879,8 +6814,6 @@ def drawContours(
maxLevel: int = ...,
offset: cv2.typing.Point = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def drawContours(
image: UMat,
@@ -7893,8 +6826,6 @@ def drawContours(
maxLevel: int = ...,
offset: cv2.typing.Point = ...,
) -> UMat: ...
-
-
@typing.overload
def drawFrameAxes(
image: cv2.typing.MatLike,
@@ -7905,8 +6836,6 @@ def drawFrameAxes(
length: float,
thickness: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def drawFrameAxes(
image: UMat,
@@ -7917,8 +6846,6 @@ def drawFrameAxes(
length: float,
thickness: int = ...,
) -> UMat: ...
-
-
@typing.overload
def drawKeypoints(
image: cv2.typing.MatLike,
@@ -7927,8 +6854,6 @@ def drawKeypoints(
color: cv2.typing.Scalar = ...,
flags: DrawMatchesFlags = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def drawKeypoints(
image: UMat,
@@ -7937,8 +6862,6 @@ def drawKeypoints(
color: cv2.typing.Scalar = ...,
flags: DrawMatchesFlags = ...,
) -> UMat: ...
-
-
@typing.overload
def drawMarker(
img: cv2.typing.MatLike,
@@ -7949,8 +6872,6 @@ def drawMarker(
thickness: int = ...,
line_type: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def drawMarker(
img: UMat,
@@ -7961,8 +6882,6 @@ def drawMarker(
thickness: int = ...,
line_type: int = ...,
) -> UMat: ...
-
-
@typing.overload
def drawMatches(
img1: cv2.typing.MatLike,
@@ -7976,8 +6895,6 @@ def drawMatches(
matchesMask: typing.Sequence[str] = ...,
flags: DrawMatchesFlags = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def drawMatches(
img1: UMat,
@@ -7991,8 +6908,6 @@ def drawMatches(
matchesMask: typing.Sequence[str] = ...,
flags: DrawMatchesFlags = ...,
) -> UMat: ...
-
-
@typing.overload
def drawMatches(
img1: cv2.typing.MatLike,
@@ -8007,8 +6922,6 @@ def drawMatches(
matchesMask: typing.Sequence[str] = ...,
flags: DrawMatchesFlags = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def drawMatches(
img1: UMat,
@@ -8023,8 +6936,6 @@ def drawMatches(
matchesMask: typing.Sequence[str] = ...,
flags: DrawMatchesFlags = ...,
) -> UMat: ...
-
-
@typing.overload
def drawMatchesKnn(
img1: cv2.typing.MatLike,
@@ -8038,8 +6949,6 @@ def drawMatchesKnn(
matchesMask: typing.Sequence[typing.Sequence[str]] = ...,
flags: DrawMatchesFlags = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def drawMatchesKnn(
img1: UMat,
@@ -8053,8 +6962,6 @@ def drawMatchesKnn(
matchesMask: typing.Sequence[typing.Sequence[str]] = ...,
flags: DrawMatchesFlags = ...,
) -> UMat: ...
-
-
@typing.overload
def edgePreservingFilter(
src: cv2.typing.MatLike,
@@ -8063,8 +6970,6 @@ def edgePreservingFilter(
sigma_s: float = ...,
sigma_r: float = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def edgePreservingFilter(
src: UMat,
@@ -8073,8 +6978,6 @@ def edgePreservingFilter(
sigma_s: float = ...,
sigma_r: float = ...,
) -> UMat: ...
-
-
@typing.overload
def eigen(
src: cv2.typing.MatLike,
@@ -8085,12 +6988,8 @@ def eigen(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def eigen(src: UMat, eigenvalues: UMat | None = ..., eigenvectors: UMat | None = ...) -> tuple[bool, UMat, UMat]: ...
-
-
@typing.overload
def eigenNonSymmetric(
src: cv2.typing.MatLike,
@@ -8100,8 +6999,6 @@ def eigenNonSymmetric(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def eigenNonSymmetric(
src: UMat,
@@ -8111,8 +7008,6 @@ def eigenNonSymmetric(
UMat,
UMat,
]: ...
-
-
@typing.overload
def ellipse(
img: cv2.typing.MatLike,
@@ -8126,8 +7021,6 @@ def ellipse(
lineType: int = ...,
shift: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def ellipse(
img: UMat,
@@ -8141,8 +7034,6 @@ def ellipse(
lineType: int = ...,
shift: int = ...,
) -> UMat: ...
-
-
@typing.overload
def ellipse(
img: cv2.typing.MatLike,
@@ -8151,8 +7042,6 @@ def ellipse(
thickness: int = ...,
lineType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def ellipse(
img: UMat,
@@ -8161,29 +7050,21 @@ def ellipse(
thickness: int = ...,
lineType: int = ...,
) -> UMat: ...
-
-
def ellipse2Poly(
- center: cv2.typing.Point, axes: cv2.typing.Size, angle: int, arcStart: int,
- arcEnd: int, delta: int,
+ center: cv2.typing.Point,
+ axes: cv2.typing.Size,
+ angle: int,
+ arcStart: int,
+ arcEnd: int,
+ delta: int,
) -> typing.Sequence[cv2.typing.Point]: ...
-
-
def empty_array_desc() -> GArrayDesc: ...
-
-
def empty_gopaque_desc() -> GOpaqueDesc: ...
-
-
def empty_scalar_desc() -> GScalarDesc: ...
-
-
@typing.overload
def equalizeHist(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def equalizeHist(src: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def erode(
src: cv2.typing.MatLike,
@@ -8194,8 +7075,6 @@ def erode(
borderType: int = ...,
borderValue: cv2.typing.Scalar = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def erode(
src: UMat,
@@ -8206,8 +7085,6 @@ def erode(
borderType: int = ...,
borderValue: cv2.typing.Scalar = ...,
) -> UMat: ...
-
-
@typing.overload
def estimateAffine2D(
from_: cv2.typing.MatLike,
@@ -8222,8 +7099,6 @@ def estimateAffine2D(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def estimateAffine2D(
from_: UMat,
@@ -8238,8 +7113,6 @@ def estimateAffine2D(
cv2.typing.MatLike,
UMat,
]: ...
-
-
@typing.overload
def estimateAffine2D(
pts1: cv2.typing.MatLike,
@@ -8250,8 +7123,6 @@ def estimateAffine2D(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def estimateAffine2D(
pts1: UMat,
@@ -8262,8 +7133,6 @@ def estimateAffine2D(
cv2.typing.MatLike,
UMat,
]: ...
-
-
@typing.overload
def estimateAffine3D(
src: cv2.typing.MatLike,
@@ -8277,8 +7146,6 @@ def estimateAffine3D(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def estimateAffine3D(
src: UMat,
@@ -8292,8 +7159,6 @@ def estimateAffine3D(
UMat,
UMat,
]: ...
-
-
@typing.overload
def estimateAffine3D(
src: cv2.typing.MatLike,
@@ -8303,12 +7168,8 @@ def estimateAffine3D(
cv2.typing.MatLike,
float,
]: ...
-
-
@typing.overload
def estimateAffine3D(src: UMat, dst: UMat, force_rotation: bool = ...) -> tuple[cv2.typing.MatLike, float]: ...
-
-
@typing.overload
def estimateAffinePartial2D(
from_: cv2.typing.MatLike,
@@ -8323,8 +7184,6 @@ def estimateAffinePartial2D(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def estimateAffinePartial2D(
from_: UMat,
@@ -8339,8 +7198,6 @@ def estimateAffinePartial2D(
cv2.typing.MatLike,
UMat,
]: ...
-
-
@typing.overload
def estimateChessboardSharpness(
image: cv2.typing.MatLike,
@@ -8353,8 +7210,6 @@ def estimateChessboardSharpness(
cv2.typing.Scalar,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def estimateChessboardSharpness(
image: UMat,
@@ -8367,8 +7222,6 @@ def estimateChessboardSharpness(
cv2.typing.Scalar,
UMat,
]: ...
-
-
@typing.overload
def estimateTranslation3D(
src: cv2.typing.MatLike,
@@ -8382,8 +7235,6 @@ def estimateTranslation3D(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def estimateTranslation3D(
src: UMat,
@@ -8397,23 +7248,15 @@ def estimateTranslation3D(
UMat,
UMat,
]: ...
-
-
@typing.overload
def exp(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def exp(src: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def extractChannel(src: cv2.typing.MatLike, coi: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def extractChannel(src: UMat, coi: int, dst: UMat | None = ...) -> UMat: ...
-
-
def fastAtan2(y: float, x: float) -> float: ...
-
-
@typing.overload
def fastNlMeansDenoising(
src: cv2.typing.MatLike,
@@ -8422,8 +7265,6 @@ def fastNlMeansDenoising(
templateWindowSize: int = ...,
searchWindowSize: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def fastNlMeansDenoising(
src: UMat,
@@ -8432,8 +7273,6 @@ def fastNlMeansDenoising(
templateWindowSize: int = ...,
searchWindowSize: int = ...,
) -> UMat: ...
-
-
@typing.overload
def fastNlMeansDenoising(
src: cv2.typing.MatLike,
@@ -8443,8 +7282,6 @@ def fastNlMeansDenoising(
searchWindowSize: int = ...,
normType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def fastNlMeansDenoising(
src: UMat,
@@ -8454,8 +7291,6 @@ def fastNlMeansDenoising(
searchWindowSize: int = ...,
normType: int = ...,
) -> UMat: ...
-
-
@typing.overload
def fastNlMeansDenoisingColored(
src: cv2.typing.MatLike,
@@ -8465,8 +7300,6 @@ def fastNlMeansDenoisingColored(
templateWindowSize: int = ...,
searchWindowSize: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def fastNlMeansDenoisingColored(
src: UMat,
@@ -8476,8 +7309,6 @@ def fastNlMeansDenoisingColored(
templateWindowSize: int = ...,
searchWindowSize: int = ...,
) -> UMat: ...
-
-
@typing.overload
def fastNlMeansDenoisingColoredMulti(
srcImgs: typing.Sequence[cv2.typing.MatLike],
@@ -8489,8 +7320,6 @@ def fastNlMeansDenoisingColoredMulti(
templateWindowSize: int = ...,
searchWindowSize: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def fastNlMeansDenoisingColoredMulti(
srcImgs: typing.Sequence[UMat],
@@ -8502,8 +7331,6 @@ def fastNlMeansDenoisingColoredMulti(
templateWindowSize: int = ...,
searchWindowSize: int = ...,
) -> UMat: ...
-
-
@typing.overload
def fastNlMeansDenoisingMulti(
srcImgs: typing.Sequence[cv2.typing.MatLike],
@@ -8514,8 +7341,6 @@ def fastNlMeansDenoisingMulti(
templateWindowSize: int = ...,
searchWindowSize: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def fastNlMeansDenoisingMulti(
srcImgs: typing.Sequence[UMat],
@@ -8526,8 +7351,6 @@ def fastNlMeansDenoisingMulti(
templateWindowSize: int = ...,
searchWindowSize: int = ...,
) -> UMat: ...
-
-
@typing.overload
def fastNlMeansDenoisingMulti(
srcImgs: typing.Sequence[cv2.typing.MatLike],
@@ -8539,8 +7362,6 @@ def fastNlMeansDenoisingMulti(
searchWindowSize: int = ...,
normType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def fastNlMeansDenoisingMulti(
srcImgs: typing.Sequence[UMat],
@@ -8552,8 +7373,6 @@ def fastNlMeansDenoisingMulti(
searchWindowSize: int = ...,
normType: int = ...,
) -> UMat: ...
-
-
@typing.overload
def fillConvexPoly(
img: cv2.typing.MatLike,
@@ -8562,8 +7381,6 @@ def fillConvexPoly(
lineType: int = ...,
shift: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def fillConvexPoly(
img: UMat,
@@ -8572,8 +7389,6 @@ def fillConvexPoly(
lineType: int = ...,
shift: int = ...,
) -> UMat: ...
-
-
@typing.overload
def fillPoly(
img: cv2.typing.MatLike,
@@ -8583,8 +7398,6 @@ def fillPoly(
shift: int = ...,
offset: cv2.typing.Point = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def fillPoly(
img: UMat,
@@ -8594,8 +7407,6 @@ def fillPoly(
shift: int = ...,
offset: cv2.typing.Point = ...,
) -> UMat: ...
-
-
@typing.overload
def filter2D(
src: cv2.typing.MatLike,
@@ -8606,8 +7417,6 @@ def filter2D(
delta: float = ...,
borderType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def filter2D(
src: UMat,
@@ -8618,8 +7427,6 @@ def filter2D(
delta: float = ...,
borderType: int = ...,
) -> UMat: ...
-
-
@typing.overload
def filterHomographyDecompByVisibleRefpoints(
rotations: typing.Sequence[cv2.typing.MatLike],
@@ -8629,8 +7436,6 @@ def filterHomographyDecompByVisibleRefpoints(
possibleSolutions: cv2.typing.MatLike | None = ...,
pointsMask: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def filterHomographyDecompByVisibleRefpoints(
rotations: typing.Sequence[UMat],
@@ -8640,8 +7445,6 @@ def filterHomographyDecompByVisibleRefpoints(
possibleSolutions: UMat | None = ...,
pointsMask: UMat | None = ...,
) -> UMat: ...
-
-
@typing.overload
def filterSpeckles(
img: cv2.typing.MatLike,
@@ -8653,8 +7456,6 @@ def filterSpeckles(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def filterSpeckles(
img: UMat,
@@ -8666,8 +7467,6 @@ def filterSpeckles(
UMat,
UMat,
]: ...
-
-
@typing.overload
def find4QuadCornerSubpix(
img: cv2.typing.MatLike,
@@ -8677,12 +7476,8 @@ def find4QuadCornerSubpix(
bool,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def find4QuadCornerSubpix(img: UMat, corners: UMat, region_size: cv2.typing.Size) -> tuple[bool, UMat]: ...
-
-
@typing.overload
def findChessboardCorners(
image: cv2.typing.MatLike,
@@ -8693,8 +7488,6 @@ def findChessboardCorners(
bool,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def findChessboardCorners(
image: UMat,
@@ -8705,8 +7498,6 @@ def findChessboardCorners(
bool,
UMat,
]: ...
-
-
@typing.overload
def findChessboardCornersSB(
image: cv2.typing.MatLike,
@@ -8717,8 +7508,6 @@ def findChessboardCornersSB(
bool,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def findChessboardCornersSB(
image: UMat,
@@ -8729,8 +7518,6 @@ def findChessboardCornersSB(
bool,
UMat,
]: ...
-
-
@typing.overload
def findChessboardCornersSBWithMeta(
image: cv2.typing.MatLike,
@@ -8743,8 +7530,6 @@ def findChessboardCornersSBWithMeta(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def findChessboardCornersSBWithMeta(
image: UMat,
@@ -8757,8 +7542,6 @@ def findChessboardCornersSBWithMeta(
UMat,
UMat,
]: ...
-
-
@typing.overload
def findCirclesGrid(
image: cv2.typing.MatLike,
@@ -8771,8 +7554,6 @@ def findCirclesGrid(
bool,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def findCirclesGrid(
image: UMat,
@@ -8785,8 +7566,6 @@ def findCirclesGrid(
bool,
UMat,
]: ...
-
-
@typing.overload
def findCirclesGrid(
image: cv2.typing.MatLike,
@@ -8798,8 +7577,6 @@ def findCirclesGrid(
bool,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def findCirclesGrid(
image: UMat,
@@ -8811,8 +7588,6 @@ def findCirclesGrid(
bool,
UMat,
]: ...
-
-
@typing.overload
def findContours(
image: cv2.typing.MatLike,
@@ -8825,8 +7600,6 @@ def findContours(
typing.Sequence[cv2.typing.MatLike],
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def findContours(
image: UMat,
@@ -8839,8 +7612,6 @@ def findContours(
typing.Sequence[UMat],
UMat,
]: ...
-
-
@typing.overload
def findEssentialMat(
points1: cv2.typing.MatLike,
@@ -8855,8 +7626,6 @@ def findEssentialMat(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def findEssentialMat(
points1: UMat,
@@ -8871,8 +7640,6 @@ def findEssentialMat(
cv2.typing.MatLike,
UMat,
]: ...
-
-
@typing.overload
def findEssentialMat(
points1: cv2.typing.MatLike,
@@ -8888,8 +7655,6 @@ def findEssentialMat(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def findEssentialMat(
points1: UMat,
@@ -8905,8 +7670,6 @@ def findEssentialMat(
cv2.typing.MatLike,
UMat,
]: ...
-
-
@typing.overload
def findEssentialMat(
points1: cv2.typing.MatLike,
@@ -8923,8 +7686,6 @@ def findEssentialMat(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def findEssentialMat(
points1: UMat,
@@ -8941,8 +7702,6 @@ def findEssentialMat(
cv2.typing.MatLike,
UMat,
]: ...
-
-
@typing.overload
def findEssentialMat(
points1: cv2.typing.MatLike,
@@ -8957,8 +7716,6 @@ def findEssentialMat(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def findEssentialMat(
points1: UMat,
@@ -8973,8 +7730,6 @@ def findEssentialMat(
cv2.typing.MatLike,
UMat,
]: ...
-
-
@typing.overload
def findFundamentalMat(
points1: cv2.typing.MatLike,
@@ -8988,8 +7743,6 @@ def findFundamentalMat(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def findFundamentalMat(
points1: UMat,
@@ -9003,8 +7756,6 @@ def findFundamentalMat(
cv2.typing.MatLike,
UMat,
]: ...
-
-
@typing.overload
def findFundamentalMat(
points1: cv2.typing.MatLike,
@@ -9017,8 +7768,6 @@ def findFundamentalMat(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def findFundamentalMat(
points1: UMat,
@@ -9031,8 +7780,6 @@ def findFundamentalMat(
cv2.typing.MatLike,
UMat,
]: ...
-
-
@typing.overload
def findFundamentalMat(
points1: cv2.typing.MatLike,
@@ -9043,8 +7790,6 @@ def findFundamentalMat(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def findFundamentalMat(
points1: UMat,
@@ -9055,8 +7800,6 @@ def findFundamentalMat(
cv2.typing.MatLike,
UMat,
]: ...
-
-
@typing.overload
def findHomography(
srcPoints: cv2.typing.MatLike,
@@ -9070,8 +7813,6 @@ def findHomography(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def findHomography(
srcPoints: UMat,
@@ -9085,8 +7826,6 @@ def findHomography(
cv2.typing.MatLike,
UMat,
]: ...
-
-
@typing.overload
def findHomography(
srcPoints: cv2.typing.MatLike,
@@ -9097,8 +7836,6 @@ def findHomography(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def findHomography(
srcPoints: UMat,
@@ -9109,14 +7846,10 @@ def findHomography(
cv2.typing.MatLike,
UMat,
]: ...
-
-
@typing.overload
def findNonZero(src: cv2.typing.MatLike, idx: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def findNonZero(src: UMat, idx: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def findTransformECC(
templateImage: cv2.typing.MatLike,
@@ -9130,8 +7863,6 @@ def findTransformECC(
float,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def findTransformECC(
templateImage: UMat,
@@ -9145,8 +7876,6 @@ def findTransformECC(
float,
UMat,
]: ...
-
-
@typing.overload
def findTransformECC(
templateImage: cv2.typing.MatLike,
@@ -9159,8 +7888,6 @@ def findTransformECC(
float,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def findTransformECC(
templateImage: UMat,
@@ -9173,49 +7900,37 @@ def findTransformECC(
float,
UMat,
]: ...
-
-
@typing.overload
def fitEllipse(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ...
@typing.overload
def fitEllipse(points: UMat) -> cv2.typing.RotatedRect: ...
-
-
@typing.overload
def fitEllipseAMS(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ...
@typing.overload
def fitEllipseAMS(points: UMat) -> cv2.typing.RotatedRect: ...
-
-
@typing.overload
def fitEllipseDirect(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ...
@typing.overload
def fitEllipseDirect(points: UMat) -> cv2.typing.RotatedRect: ...
-
-
@typing.overload
def fitLine(
- points: cv2.typing.MatLike, distType: int, param: float, reps: float,
- aeps: float, line: cv2.typing.MatLike | None = ...,
+ points: cv2.typing.MatLike,
+ distType: int,
+ param: float,
+ reps: float,
+ aeps: float,
+ line: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def fitLine(points: UMat, distType: int, param: float, reps: float, aeps: float, line: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def flip(src: cv2.typing.MatLike, flipCode: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def flip(src: UMat, flipCode: int, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def flipND(src: cv2.typing.MatLike, axis: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def flipND(src: UMat, axis: int, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def floodFill(
image: cv2.typing.MatLike,
@@ -9231,8 +7946,6 @@ def floodFill(
cv2.typing.MatLike,
cv2.typing.Rect,
]: ...
-
-
@typing.overload
def floodFill(
image: UMat,
@@ -9248,15 +7961,16 @@ def floodFill(
UMat,
cv2.typing.Rect,
]: ...
-
-
@typing.overload
def gemm(
- src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, alpha: float, src3: cv2.typing.MatLike,
- beta: float, dst: cv2.typing.MatLike | None = ..., flags: int = ...,
+ src1: cv2.typing.MatLike,
+ src2: cv2.typing.MatLike,
+ alpha: float,
+ src3: cv2.typing.MatLike,
+ beta: float,
+ dst: cv2.typing.MatLike | None = ...,
+ flags: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def gemm(
src1: UMat,
@@ -9266,39 +7980,26 @@ def gemm(
beta: float,
dst: UMat | None = ...,
flags: int = ...,
-) -> UMat: ...
-
-
+) -> UMat: ...
@typing.overload
def getAffineTransform(src: cv2.typing.MatLike, dst: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
@typing.overload
def getAffineTransform(src: UMat, dst: UMat) -> cv2.typing.MatLike: ...
-
-
def getBuildInformation() -> str: ...
-
-
def getCPUFeaturesLine() -> str: ...
-
-
def getCPUTickCount() -> int: ...
-
-
@typing.overload
def getDefaultNewCameraMatrix(
cameraMatrix: cv2.typing.MatLike,
imgsize: cv2.typing.Size = ...,
centerPrincipalPoint: bool = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def getDefaultNewCameraMatrix(
- cameraMatrix: UMat, imgsize: cv2.typing.Size = ...,
+ cameraMatrix: UMat,
+ imgsize: cv2.typing.Size = ...,
centerPrincipalPoint: bool = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def getDerivKernels(
dx: int,
@@ -9312,8 +8013,6 @@ def getDerivKernels(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def getDerivKernels(
dx: int,
@@ -9327,11 +8026,7 @@ def getDerivKernels(
UMat,
UMat,
]: ...
-
-
def getFontScaleFromHeight(fontFace: int, pixelHeight: int, thickness: int = ...) -> float: ...
-
-
def getGaborKernel(
ksize: cv2.typing.Size,
sigma: float,
@@ -9341,26 +8036,12 @@ def getGaborKernel(
psi: float = ...,
ktype: int = ...,
) -> cv2.typing.MatLike: ...
-
-
def getGaussianKernel(ksize: int, sigma: float, ktype: int = ...) -> cv2.typing.MatLike: ...
-
-
def getHardwareFeatureName(feature: int) -> str: ...
-
-
def getLogLevel() -> int: ...
-
-
def getNumThreads() -> int: ...
-
-
def getNumberOfCPUs() -> int: ...
-
-
def getOptimalDFTSize(vecsize: int) -> int: ...
-
-
@typing.overload
def getOptimalNewCameraMatrix(
cameraMatrix: cv2.typing.MatLike,
@@ -9373,8 +8054,6 @@ def getOptimalNewCameraMatrix(
cv2.typing.MatLike,
cv2.typing.Rect,
]: ...
-
-
@typing.overload
def getOptimalNewCameraMatrix(
cameraMatrix: UMat,
@@ -9387,26 +8066,22 @@ def getOptimalNewCameraMatrix(
cv2.typing.MatLike,
cv2.typing.Rect,
]: ...
-
-
@typing.overload
def getPerspectiveTransform(
- src: cv2.typing.MatLike, dst: cv2.typing.MatLike,
+ src: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike,
solveMethod: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def getPerspectiveTransform(src: UMat, dst: UMat, solveMethod: int = ...) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def getRectSubPix(
- image: cv2.typing.MatLike, patchSize: cv2.typing.Size, center: cv2.typing.Point2f,
- patch: cv2.typing.MatLike | None = ..., patchType: int = ...,
+ image: cv2.typing.MatLike,
+ patchSize: cv2.typing.Size,
+ center: cv2.typing.Point2f,
+ patch: cv2.typing.MatLike | None = ...,
+ patchType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def getRectSubPix(
image: UMat,
@@ -9415,29 +8090,13 @@ def getRectSubPix(
patch: UMat | None = ...,
patchType: int = ...,
) -> UMat: ...
-
-
def getRotationMatrix2D(center: cv2.typing.Point2f, angle: float, scale: float) -> cv2.typing.MatLike: ...
-
-
def getStructuringElement(shape: int, ksize: cv2.typing.Size, anchor: cv2.typing.Point = ...) -> cv2.typing.MatLike: ...
-
-
def getTextSize(text: str, fontFace: int, fontScale: float, thickness: int) -> tuple[cv2.typing.Size, int]: ...
-
-
def getThreadNum() -> int: ...
-
-
def getTickCount() -> int: ...
-
-
def getTickFrequency() -> float: ...
-
-
def getTrackbarPos(trackbarname: str, winname: str) -> int: ...
-
-
def getValidDisparityROI(
roi1: cv2.typing.Rect,
roi2: cv2.typing.Rect,
@@ -9445,26 +8104,12 @@ def getValidDisparityROI(
numberOfDisparities: int,
blockSize: int,
) -> cv2.typing.Rect: ...
-
-
def getVersionMajor() -> int: ...
-
-
def getVersionMinor() -> int: ...
-
-
def getVersionRevision() -> int: ...
-
-
def getVersionString() -> str: ...
-
-
def getWindowImageRect(winname: str) -> cv2.typing.Rect: ...
-
-
def getWindowProperty(winname: str, prop_id: int) -> float: ...
-
-
@typing.overload
def goodFeaturesToTrack(
image: cv2.typing.MatLike,
@@ -9477,8 +8122,6 @@ def goodFeaturesToTrack(
useHarrisDetector: bool = ...,
k: float = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def goodFeaturesToTrack(
image: UMat,
@@ -9491,8 +8134,6 @@ def goodFeaturesToTrack(
useHarrisDetector: bool = ...,
k: float = ...,
) -> UMat: ...
-
-
@typing.overload
def goodFeaturesToTrack(
image: cv2.typing.MatLike,
@@ -9506,8 +8147,6 @@ def goodFeaturesToTrack(
useHarrisDetector: bool = ...,
k: float = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def goodFeaturesToTrack(
image: UMat,
@@ -9521,8 +8160,6 @@ def goodFeaturesToTrack(
useHarrisDetector: bool = ...,
k: float = ...,
) -> UMat: ...
-
-
@typing.overload
def goodFeaturesToTrackWithQuality(
image: cv2.typing.MatLike,
@@ -9540,8 +8177,6 @@ def goodFeaturesToTrackWithQuality(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def goodFeaturesToTrackWithQuality(
image: UMat,
@@ -9559,8 +8194,6 @@ def goodFeaturesToTrackWithQuality(
UMat,
UMat,
]: ...
-
-
@typing.overload
def grabCut(
img: cv2.typing.MatLike,
@@ -9575,8 +8208,6 @@ def grabCut(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def grabCut(
img: UMat,
@@ -9591,8 +8222,6 @@ def grabCut(
UMat,
UMat,
]: ...
-
-
def groupRectangles(
rectList: typing.Sequence[cv2.typing.Rect],
groupThreshold: int,
@@ -9601,46 +8230,30 @@ def groupRectangles(
typing.Sequence[cv2.typing.Rect],
typing.Sequence[int],
]: ...
-
-
@typing.overload
def hasNonZero(src: cv2.typing.MatLike) -> bool: ...
@typing.overload
def hasNonZero(src: UMat) -> bool: ...
-
-
def haveImageReader(filename: str) -> bool: ...
-
-
def haveImageWriter(filename: str) -> bool: ...
-
-
def haveOpenVX() -> bool: ...
-
-
@typing.overload
def hconcat(src: typing.Sequence[cv2.typing.MatLike], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def hconcat(src: typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def idct(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> cv2.typing.MatLike: ...
@typing.overload
def idct(src: UMat, dst: UMat | None = ..., flags: int = ...) -> UMat: ...
-
-
@typing.overload
def idft(
- src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...,
- flags: int = ..., nonzeroRows: int = ...,
+ src: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike | None = ...,
+ flags: int = ...,
+ nonzeroRows: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def idft(src: UMat, dst: UMat | None = ..., flags: int = ..., nonzeroRows: int = ...) -> UMat: ...
-
-
@typing.overload
def illuminationChange(
src: cv2.typing.MatLike,
@@ -9649,8 +8262,6 @@ def illuminationChange(
alpha: float = ...,
beta: float = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def illuminationChange(
src: UMat,
@@ -9659,17 +8270,11 @@ def illuminationChange(
alpha: float = ...,
beta: float = ...,
) -> UMat: ...
-
-
def imcount(filename: str, flags: int = ...) -> int: ...
-
-
@typing.overload
def imdecode(buf: cv2.typing.MatLike, flags: int) -> cv2.typing.MatLike: ...
@typing.overload
def imdecode(buf: UMat, flags: int) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def imdecodemulti(
buf: cv2.typing.MatLike,
@@ -9679,8 +8284,6 @@ def imdecodemulti(
bool,
typing.Sequence[cv2.typing.MatLike],
]: ...
-
-
@typing.overload
def imdecodemulti(
buf: UMat,
@@ -9690,8 +8293,6 @@ def imdecodemulti(
bool,
typing.Sequence[cv2.typing.MatLike],
]: ...
-
-
@typing.overload
def imencode(
ext: str,
@@ -9704,8 +8305,6 @@ def imencode(
numpy.dtype[numpy.uint8],
],
]: ...
-
-
@typing.overload
def imencode(
ext: str,
@@ -9718,11 +8317,7 @@ def imencode(
numpy.dtype[numpy.uint8],
],
]: ...
-
-
def imread(filename: str, flags: int = ...) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def imreadmulti(
filename: str,
@@ -9732,8 +8327,6 @@ def imreadmulti(
bool,
typing.Sequence[cv2.typing.MatLike],
]: ...
-
-
@typing.overload
def imreadmulti(
filename: str,
@@ -9745,45 +8338,33 @@ def imreadmulti(
bool,
typing.Sequence[cv2.typing.MatLike],
]: ...
-
-
@typing.overload
def imshow(winname: str, mat: cv2.typing.MatLike) -> None: ...
@typing.overload
def imshow(winname: str, mat: cv2.cuda.GpuMat) -> None: ...
@typing.overload
def imshow(winname: str, mat: UMat) -> None: ...
-
-
@typing.overload
def imwrite(filename: str, img: cv2.typing.MatLike, params: typing.Sequence[int] = ...) -> bool: ...
@typing.overload
def imwrite(filename: str, img: UMat, params: typing.Sequence[int] = ...) -> bool: ...
-
-
@typing.overload
def imwritemulti(
filename: str,
img: typing.Sequence[cv2.typing.MatLike],
params: typing.Sequence[int] = ...,
) -> bool: ...
-
-
@typing.overload
def imwritemulti(filename: str, img: typing.Sequence[UMat], params: typing.Sequence[int] = ...) -> bool: ...
-
-
@typing.overload
def inRange(
- src: cv2.typing.MatLike, lowerb: cv2.typing.MatLike, upperb: cv2.typing.MatLike,
+ src: cv2.typing.MatLike,
+ lowerb: cv2.typing.MatLike,
+ upperb: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def inRange(src: UMat, lowerb: UMat, upperb: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def initCameraMatrix2D(
objectPoints: typing.Sequence[cv2.typing.MatLike],
@@ -9791,8 +8372,6 @@ def initCameraMatrix2D(
imageSize: cv2.typing.Size,
aspectRatio: float = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def initCameraMatrix2D(
objectPoints: typing.Sequence[UMat],
@@ -9800,8 +8379,6 @@ def initCameraMatrix2D(
imageSize: cv2.typing.Size,
aspectRatio: float = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def initInverseRectificationMap(
cameraMatrix: cv2.typing.MatLike,
@@ -9816,8 +8393,6 @@ def initInverseRectificationMap(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def initInverseRectificationMap(
cameraMatrix: UMat,
@@ -9832,8 +8407,6 @@ def initInverseRectificationMap(
UMat,
UMat,
]: ...
-
-
@typing.overload
def initUndistortRectifyMap(
cameraMatrix: cv2.typing.MatLike,
@@ -9848,8 +8421,6 @@ def initUndistortRectifyMap(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def initUndistortRectifyMap(
cameraMatrix: UMat,
@@ -9864,36 +8435,28 @@ def initUndistortRectifyMap(
UMat,
UMat,
]: ...
-
-
@typing.overload
def inpaint(
- src: cv2.typing.MatLike, inpaintMask: cv2.typing.MatLike, inpaintRadius: float,
- flags: int, dst: cv2.typing.MatLike | None = ...,
+ src: cv2.typing.MatLike,
+ inpaintMask: cv2.typing.MatLike,
+ inpaintRadius: float,
+ flags: int,
+ dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def inpaint(src: UMat, inpaintMask: UMat, inpaintRadius: float, flags: int, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def insertChannel(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, coi: int) -> cv2.typing.MatLike: ...
@typing.overload
def insertChannel(src: UMat, dst: UMat, coi: int) -> UMat: ...
-
-
@typing.overload
def integral(
- src: cv2.typing.MatLike, sum: cv2.typing.MatLike |
- None = ..., sdepth: int = ...,
+ src: cv2.typing.MatLike,
+ sum: cv2.typing.MatLike | None = ...,
+ sdepth: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def integral(src: UMat, sum: UMat | None = ..., sdepth: int = ...) -> UMat: ...
-
-
@typing.overload
def integral2(
src: cv2.typing.MatLike,
@@ -9905,8 +8468,6 @@ def integral2(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def integral2(
src: UMat,
@@ -9918,8 +8479,6 @@ def integral2(
UMat,
UMat,
]: ...
-
-
@typing.overload
def integral3(
src: cv2.typing.MatLike,
@@ -9933,8 +8492,6 @@ def integral3(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def integral3(
src: UMat,
@@ -9948,8 +8505,6 @@ def integral3(
UMat,
UMat,
]: ...
-
-
@typing.overload
def intersectConvexConvex(
p1: cv2.typing.MatLike,
@@ -9960,8 +8515,6 @@ def intersectConvexConvex(
float,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def intersectConvexConvex(
p1: UMat,
@@ -9972,8 +8525,6 @@ def intersectConvexConvex(
float,
UMat,
]: ...
-
-
@typing.overload
def invert(
src: cv2.typing.MatLike,
@@ -9983,24 +8534,16 @@ def invert(
float,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def invert(src: UMat, dst: UMat | None = ..., flags: int = ...) -> tuple[float, UMat]: ...
-
-
@typing.overload
def invertAffineTransform(M: cv2.typing.MatLike, iM: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def invertAffineTransform(M: UMat, iM: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def isContourConvex(contour: cv2.typing.MatLike) -> bool: ...
@typing.overload
def isContourConvex(contour: UMat) -> bool: ...
-
-
@typing.overload
def kmeans(
data: cv2.typing.MatLike,
@@ -10015,8 +8558,6 @@ def kmeans(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def kmeans(
data: UMat,
@@ -10031,8 +8572,6 @@ def kmeans(
UMat,
UMat,
]: ...
-
-
@typing.overload
def line(
img: cv2.typing.MatLike,
@@ -10043,8 +8582,6 @@ def line(
lineType: int = ...,
shift: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def line(
img: UMat,
@@ -10055,15 +8592,14 @@ def line(
lineType: int = ...,
shift: int = ...,
) -> UMat: ...
-
-
@typing.overload
def linearPolar(
- src: cv2.typing.MatLike, center: cv2.typing.Point2f, maxRadius: float,
- flags: int, dst: cv2.typing.MatLike | None = ...,
+ src: cv2.typing.MatLike,
+ center: cv2.typing.Point2f,
+ maxRadius: float,
+ flags: int,
+ dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def linearPolar(
src: UMat,
@@ -10072,36 +8608,28 @@ def linearPolar(
flags: int,
dst: UMat | None = ...,
) -> UMat: ...
-
-
@typing.overload
def log(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def log(src: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def logPolar(
- src: cv2.typing.MatLike, center: cv2.typing.Point2f, M: float, flags: int,
+ src: cv2.typing.MatLike,
+ center: cv2.typing.Point2f,
+ M: float,
+ flags: int,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def logPolar(src: UMat, center: cv2.typing.Point2f, M: float, flags: int, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def magnitude(
- x: cv2.typing.MatLike, y: cv2.typing.MatLike,
+ x: cv2.typing.MatLike,
+ y: cv2.typing.MatLike,
magnitude: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def magnitude(x: UMat, y: UMat, magnitude: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def matMulDeriv(
A: cv2.typing.MatLike,
@@ -10112,18 +8640,12 @@ def matMulDeriv(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def matMulDeriv(A: UMat, B: UMat, dABdA: UMat | None = ..., dABdB: UMat | None = ...) -> tuple[UMat, UMat]: ...
-
-
@typing.overload
def matchShapes(contour1: cv2.typing.MatLike, contour2: cv2.typing.MatLike, method: int, parameter: float) -> float: ...
@typing.overload
def matchShapes(contour1: UMat, contour2: UMat, method: int, parameter: float) -> float: ...
-
-
@typing.overload
def matchTemplate(
image: cv2.typing.MatLike,
@@ -10132,8 +8654,6 @@ def matchTemplate(
result: cv2.typing.MatLike | None = ...,
mask: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def matchTemplate(
image: UMat,
@@ -10142,25 +8662,18 @@ def matchTemplate(
result: UMat | None = ...,
mask: UMat | None = ...,
) -> UMat: ...
-
-
@typing.overload
def max(
- src1: cv2.typing.MatLike, src2: cv2.typing.MatLike,
+ src1: cv2.typing.MatLike,
+ src2: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def max(src1: UMat, src2: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def mean(src: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> cv2.typing.Scalar: ...
@typing.overload
def mean(src: UMat, mask: UMat | None = ...) -> cv2.typing.Scalar: ...
-
-
@typing.overload
def meanShift(
probImage: cv2.typing.MatLike,
@@ -10170,8 +8683,6 @@ def meanShift(
int,
cv2.typing.Rect,
]: ...
-
-
@typing.overload
def meanShift(
probImage: UMat,
@@ -10180,9 +8691,7 @@ def meanShift(
) -> tuple[
int,
cv2.typing.Rect,
-]: ...
-
-
+]: ...
@typing.overload
def meanStdDev(
src: cv2.typing.MatLike,
@@ -10193,8 +8702,6 @@ def meanStdDev(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def meanStdDev(
src: UMat,
@@ -10205,43 +8712,30 @@ def meanStdDev(
UMat,
UMat,
]: ...
-
-
@typing.overload
def medianBlur(src: cv2.typing.MatLike, ksize: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def medianBlur(src: UMat, ksize: int, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def merge(mv: typing.Sequence[cv2.typing.MatLike], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def merge(mv: typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def min(
- src1: cv2.typing.MatLike, src2: cv2.typing.MatLike,
+ src1: cv2.typing.MatLike,
+ src2: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def min(src1: UMat, src2: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def minAreaRect(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ...
@typing.overload
def minAreaRect(points: UMat) -> cv2.typing.RotatedRect: ...
-
-
@typing.overload
def minEnclosingCircle(points: cv2.typing.MatLike) -> tuple[cv2.typing.Point2f, float]: ...
@typing.overload
def minEnclosingCircle(points: UMat) -> tuple[cv2.typing.Point2f, float]: ...
-
-
@typing.overload
def minEnclosingTriangle(
points: cv2.typing.MatLike,
@@ -10250,12 +8744,8 @@ def minEnclosingTriangle(
float,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def minEnclosingTriangle(points: UMat, triangle: UMat | None = ...) -> tuple[float, UMat]: ...
-
-
@typing.overload
def minMaxLoc(
src: cv2.typing.MatLike,
@@ -10266,33 +8756,24 @@ def minMaxLoc(
cv2.typing.Point,
cv2.typing.Point,
]: ...
-
-
@typing.overload
def minMaxLoc(src: UMat, mask: UMat | None = ...) -> tuple[float, float, cv2.typing.Point, cv2.typing.Point]: ...
-
-
@typing.overload
def mixChannels(
src: typing.Sequence[cv2.typing.MatLike],
dst: typing.Sequence[cv2.typing.MatLike],
fromTo: typing.Sequence[int],
) -> typing.Sequence[cv2.typing.MatLike]: ...
-
-
@typing.overload
def mixChannels(
- src: typing.Sequence[UMat], dst: typing.Sequence[UMat],
+ src: typing.Sequence[UMat],
+ dst: typing.Sequence[UMat],
fromTo: typing.Sequence[int],
) -> typing.Sequence[UMat]: ...
-
-
@typing.overload
def moments(array: cv2.typing.MatLike, binaryImage: bool = ...) -> cv2.typing.Moments: ...
@typing.overload
def moments(array: UMat, binaryImage: bool = ...) -> cv2.typing.Moments: ...
-
-
@typing.overload
def morphologyEx(
src: cv2.typing.MatLike,
@@ -10304,8 +8785,6 @@ def morphologyEx(
borderType: int = ...,
borderValue: cv2.typing.Scalar = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def morphologyEx(
src: UMat,
@@ -10317,11 +8796,7 @@ def morphologyEx(
borderType: int = ...,
borderValue: cv2.typing.Scalar = ...,
) -> UMat: ...
-
-
def moveWindow(winname: str, x: int, y: int) -> None: ...
-
-
@typing.overload
def mulSpectrums(
a: cv2.typing.MatLike,
@@ -10330,12 +8805,8 @@ def mulSpectrums(
c: cv2.typing.MatLike | None = ...,
conjB: bool = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def mulSpectrums(a: UMat, b: UMat, flags: int, c: UMat | None = ..., conjB: bool = ...) -> UMat: ...
-
-
@typing.overload
def mulTransposed(
src: cv2.typing.MatLike,
@@ -10345,8 +8816,6 @@ def mulTransposed(
scale: float = ...,
dtype: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def mulTransposed(
src: UMat,
@@ -10356,28 +8825,21 @@ def mulTransposed(
scale: float = ...,
dtype: int = ...,
) -> UMat: ...
-
-
@typing.overload
def multiply(
- src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike |
- None = ..., scale: float = ..., dtype: int = ...,
+ src1: cv2.typing.MatLike,
+ src2: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike | None = ...,
+ scale: float = ...,
+ dtype: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def multiply(src1: UMat, src2: UMat, dst: UMat | None = ..., scale: float = ..., dtype: int = ...) -> UMat: ...
-
-
def namedWindow(winname: str, flags: int = ...) -> None: ...
-
-
@typing.overload
def norm(src1: cv2.typing.MatLike, normType: int = ..., mask: cv2.typing.MatLike | None = ...) -> float: ...
@typing.overload
def norm(src1: UMat, normType: int = ..., mask: UMat | None = ...) -> float: ...
-
-
@typing.overload
def norm(
src1: cv2.typing.MatLike,
@@ -10385,19 +8847,18 @@ def norm(
normType: int = ...,
mask: cv2.typing.MatLike | None = ...,
) -> float: ...
-
-
@typing.overload
def norm(src1: UMat, src2: UMat, normType: int = ..., mask: UMat | None = ...) -> float: ...
-
-
@typing.overload
def normalize(
- src: cv2.typing.MatLike, dst: cv2.typing.MatLike, alpha: float = ..., beta: float = ...,
- norm_type: int = ..., dtype: int = ..., mask: cv2.typing.MatLike | None = ...,
+ src: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike,
+ alpha: float = ...,
+ beta: float = ...,
+ norm_type: int = ...,
+ dtype: int = ...,
+ mask: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def normalize(
src: UMat,
@@ -10408,14 +8869,10 @@ def normalize(
dtype: int = ...,
mask: UMat | None = ...,
) -> UMat: ...
-
-
@typing.overload
def patchNaNs(a: cv2.typing.MatLike, val: float = ...) -> cv2.typing.MatLike: ...
@typing.overload
def patchNaNs(a: UMat, val: float = ...) -> UMat: ...
-
-
@typing.overload
def pencilSketch(
src: cv2.typing.MatLike,
@@ -10428,8 +8885,6 @@ def pencilSketch(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def pencilSketch(
src: UMat,
@@ -10442,30 +8897,23 @@ def pencilSketch(
UMat,
UMat,
]: ...
-
-
@typing.overload
def perspectiveTransform(
- src: cv2.typing.MatLike, m: cv2.typing.MatLike,
+ src: cv2.typing.MatLike,
+ m: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def perspectiveTransform(src: UMat, m: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def phase(
- x: cv2.typing.MatLike, y: cv2.typing.MatLike, angle: cv2.typing.MatLike |
- None = ..., angleInDegrees: bool = ...,
+ x: cv2.typing.MatLike,
+ y: cv2.typing.MatLike,
+ angle: cv2.typing.MatLike | None = ...,
+ angleInDegrees: bool = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def phase(x: UMat, y: UMat, angle: UMat | None = ..., angleInDegrees: bool = ...) -> UMat: ...
-
-
@typing.overload
def phaseCorrelate(
src1: cv2.typing.MatLike,
@@ -10475,18 +8923,12 @@ def phaseCorrelate(
cv2.typing.Point2d,
float,
]: ...
-
-
@typing.overload
def phaseCorrelate(src1: UMat, src2: UMat, window: UMat | None = ...) -> tuple[cv2.typing.Point2d, float]: ...
-
-
@typing.overload
def pointPolygonTest(contour: cv2.typing.MatLike, pt: cv2.typing.Point2f, measureDist: bool) -> float: ...
@typing.overload
def pointPolygonTest(contour: UMat, pt: cv2.typing.Point2f, measureDist: bool) -> float: ...
-
-
@typing.overload
def polarToCart(
magnitude: cv2.typing.MatLike,
@@ -10498,8 +8940,6 @@ def polarToCart(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def polarToCart(
magnitude: UMat,
@@ -10511,11 +8951,7 @@ def polarToCart(
UMat,
UMat,
]: ...
-
-
def pollKey() -> int: ...
-
-
@typing.overload
def polylines(
img: cv2.typing.MatLike,
@@ -10526,8 +8962,6 @@ def polylines(
lineType: int = ...,
shift: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def polylines(
img: UMat,
@@ -10538,25 +8972,19 @@ def polylines(
lineType: int = ...,
shift: int = ...,
) -> UMat: ...
-
-
@typing.overload
def pow(src: cv2.typing.MatLike, power: float, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def pow(src: UMat, power: float, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def preCornerDetect(
- src: cv2.typing.MatLike, ksize: int, dst: cv2.typing.MatLike |
- None = ..., borderType: int = ...,
+ src: cv2.typing.MatLike,
+ ksize: int,
+ dst: cv2.typing.MatLike | None = ...,
+ borderType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def preCornerDetect(src: UMat, ksize: int, dst: UMat | None = ..., borderType: int = ...) -> UMat: ...
-
-
@typing.overload
def projectPoints(
objectPoints: cv2.typing.MatLike,
@@ -10571,8 +8999,6 @@ def projectPoints(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def projectPoints(
objectPoints: UMat,
@@ -10587,8 +9013,6 @@ def projectPoints(
UMat,
UMat,
]: ...
-
-
@typing.overload
def putText(
img: cv2.typing.MatLike,
@@ -10601,8 +9025,6 @@ def putText(
lineType: int = ...,
bottomLeftOrigin: bool = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def putText(
img: UMat,
@@ -10615,8 +9037,6 @@ def putText(
lineType: int = ...,
bottomLeftOrigin: bool = ...,
) -> UMat: ...
-
-
@typing.overload
def pyrDown(
src: cv2.typing.MatLike,
@@ -10624,12 +9044,8 @@ def pyrDown(
dstsize: cv2.typing.Size = ...,
borderType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def pyrDown(src: UMat, dst: UMat | None = ..., dstsize: cv2.typing.Size = ..., borderType: int = ...) -> UMat: ...
-
-
@typing.overload
def pyrMeanShiftFiltering(
src: cv2.typing.MatLike,
@@ -10639,8 +9055,6 @@ def pyrMeanShiftFiltering(
maxLevel: int = ...,
termcrit: cv2.typing.TermCriteria = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def pyrMeanShiftFiltering(
src: UMat,
@@ -10650,8 +9064,6 @@ def pyrMeanShiftFiltering(
maxLevel: int = ...,
termcrit: cv2.typing.TermCriteria = ...,
) -> UMat: ...
-
-
@typing.overload
def pyrUp(
src: cv2.typing.MatLike,
@@ -10659,33 +9071,21 @@ def pyrUp(
dstsize: cv2.typing.Size = ...,
borderType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def pyrUp(src: UMat, dst: UMat | None = ..., dstsize: cv2.typing.Size = ..., borderType: int = ...) -> UMat: ...
-
-
@typing.overload
def randShuffle(dst: cv2.typing.MatLike, iterFactor: float = ...) -> cv2.typing.MatLike: ...
@typing.overload
def randShuffle(dst: UMat, iterFactor: float = ...) -> UMat: ...
-
-
@typing.overload
def randn(dst: cv2.typing.MatLike, mean: cv2.typing.MatLike, stddev: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
@typing.overload
def randn(dst: UMat, mean: UMat, stddev: UMat) -> UMat: ...
-
-
@typing.overload
def randu(dst: cv2.typing.MatLike, low: cv2.typing.MatLike, high: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
@typing.overload
def randu(dst: UMat, low: UMat, high: UMat) -> UMat: ...
-
-
def readOpticalFlow(path: str) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def recoverPose(
points1: cv2.typing.MatLike,
@@ -10708,8 +9108,6 @@ def recoverPose(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def recoverPose(
points1: UMat,
@@ -10732,8 +9130,6 @@ def recoverPose(
UMat,
UMat,
]: ...
-
-
@typing.overload
def recoverPose(
E: cv2.typing.MatLike,
@@ -10749,8 +9145,6 @@ def recoverPose(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def recoverPose(
E: UMat,
@@ -10766,8 +9160,6 @@ def recoverPose(
UMat,
UMat,
]: ...
-
-
@typing.overload
def recoverPose(
E: cv2.typing.MatLike,
@@ -10784,8 +9176,6 @@ def recoverPose(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def recoverPose(
E: UMat,
@@ -10802,8 +9192,6 @@ def recoverPose(
UMat,
UMat,
]: ...
-
-
@typing.overload
def recoverPose(
E: cv2.typing.MatLike,
@@ -10822,8 +9210,6 @@ def recoverPose(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def recoverPose(
E: UMat,
@@ -10842,8 +9228,6 @@ def recoverPose(
UMat,
UMat,
]: ...
-
-
@typing.overload
def rectangle(
img: cv2.typing.MatLike,
@@ -10854,8 +9238,6 @@ def rectangle(
lineType: int = ...,
shift: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def rectangle(
img: UMat,
@@ -10866,8 +9248,6 @@ def rectangle(
lineType: int = ...,
shift: int = ...,
) -> UMat: ...
-
-
@typing.overload
def rectangle(
img: cv2.typing.MatLike,
@@ -10877,8 +9257,6 @@ def rectangle(
lineType: int = ...,
shift: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def rectangle(
img: UMat,
@@ -10888,11 +9266,7 @@ def rectangle(
lineType: int = ...,
shift: int = ...,
) -> UMat: ...
-
-
def rectangleIntersectionArea(a: cv2.typing.Rect2d, b: cv2.typing.Rect2d) -> float: ...
-
-
@typing.overload
def rectify3Collinear(
cameraMatrix1: cv2.typing.MatLike,
@@ -10930,8 +9304,6 @@ def rectify3Collinear(
cv2.typing.Rect,
cv2.typing.Rect,
]: ...
-
-
@typing.overload
def rectify3Collinear(
cameraMatrix1: UMat,
@@ -10969,8 +9341,6 @@ def rectify3Collinear(
cv2.typing.Rect,
cv2.typing.Rect,
]: ...
-
-
@typing.overload
def reduce(
src: cv2.typing.MatLike,
@@ -10979,34 +9349,26 @@ def reduce(
dst: cv2.typing.MatLike | None = ...,
dtype: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def reduce(src: UMat, dim: int, rtype: int, dst: UMat | None = ..., dtype: int = ...) -> UMat: ...
-
-
@typing.overload
def reduceArgMax(
- src: cv2.typing.MatLike, axis: int, dst: cv2.typing.MatLike |
- None = ..., lastIndex: bool = ...,
+ src: cv2.typing.MatLike,
+ axis: int,
+ dst: cv2.typing.MatLike | None = ...,
+ lastIndex: bool = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def reduceArgMax(src: UMat, axis: int, dst: UMat | None = ..., lastIndex: bool = ...) -> UMat: ...
-
-
@typing.overload
def reduceArgMin(
- src: cv2.typing.MatLike, axis: int, dst: cv2.typing.MatLike |
- None = ..., lastIndex: bool = ...,
+ src: cv2.typing.MatLike,
+ axis: int,
+ dst: cv2.typing.MatLike | None = ...,
+ lastIndex: bool = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def reduceArgMin(src: UMat, axis: int, dst: UMat | None = ..., lastIndex: bool = ...) -> UMat: ...
-
-
@typing.overload
def remap(
src: cv2.typing.MatLike,
@@ -11017,21 +9379,20 @@ def remap(
borderMode: int = ...,
borderValue: cv2.typing.Scalar = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def remap(
- src: UMat, map1: UMat, map2: UMat, interpolation: int, dst: UMat | None = ...,
- borderMode: int = ..., borderValue: cv2.typing.Scalar = ...,
+ src: UMat,
+ map1: UMat,
+ map2: UMat,
+ interpolation: int,
+ dst: UMat | None = ...,
+ borderMode: int = ...,
+ borderValue: cv2.typing.Scalar = ...,
) -> UMat: ...
-
-
@typing.overload
def repeat(src: cv2.typing.MatLike, ny: int, nx: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def repeat(src: UMat, ny: int, nx: int, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def reprojectImageTo3D(
disparity: cv2.typing.MatLike,
@@ -11040,8 +9401,6 @@ def reprojectImageTo3D(
handleMissingValues: bool = ...,
ddepth: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def reprojectImageTo3D(
disparity: UMat,
@@ -11050,15 +9409,15 @@ def reprojectImageTo3D(
handleMissingValues: bool = ...,
ddepth: int = ...,
) -> UMat: ...
-
-
@typing.overload
def resize(
- src: cv2.typing.MatLike, dsize: cv2.typing.Size | None, dst: cv2.typing.MatLike | None = ...,
- fx: float = ..., fy: float = ..., interpolation: int = ...,
+ src: cv2.typing.MatLike,
+ dsize: cv2.typing.Size | None,
+ dst: cv2.typing.MatLike | None = ...,
+ fx: float = ...,
+ fy: float = ...,
+ interpolation: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def resize(
src: UMat,
@@ -11068,20 +9427,14 @@ def resize(
fy: float = ...,
interpolation: int = ...,
) -> UMat: ...
-
-
@typing.overload
def resizeWindow(winname: str, width: int, height: int) -> None: ...
@typing.overload
def resizeWindow(winname: str, size: cv2.typing.Size) -> None: ...
-
-
@typing.overload
def rotate(src: cv2.typing.MatLike, rotateCode: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def rotate(src: UMat, rotateCode: int, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def rotatedRectangleIntersection(
rect1: cv2.typing.RotatedRect,
@@ -11091,8 +9444,6 @@ def rotatedRectangleIntersection(
int,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def rotatedRectangleIntersection(
rect1: cv2.typing.RotatedRect,
@@ -11102,25 +9453,19 @@ def rotatedRectangleIntersection(
int,
UMat,
]: ...
-
-
@typing.overload
def sampsonDistance(pt1: cv2.typing.MatLike, pt2: cv2.typing.MatLike, F: cv2.typing.MatLike) -> float: ...
@typing.overload
def sampsonDistance(pt1: UMat, pt2: UMat, F: UMat) -> float: ...
-
-
@typing.overload
def scaleAdd(
- src1: cv2.typing.MatLike, alpha: float, src2: cv2.typing.MatLike,
+ src1: cv2.typing.MatLike,
+ alpha: float,
+ src2: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def scaleAdd(src1: UMat, alpha: float, src2: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def seamlessClone(
src: cv2.typing.MatLike,
@@ -11130,22 +9475,23 @@ def seamlessClone(
flags: int,
blend: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def seamlessClone(
- src: UMat, dst: UMat, mask: UMat, p: cv2.typing.Point,
- flags: int, blend: UMat | None = ...,
+ src: UMat,
+ dst: UMat,
+ mask: UMat,
+ p: cv2.typing.Point,
+ flags: int,
+ blend: UMat | None = ...,
) -> UMat: ...
-
-
@typing.overload
def selectROI(
- windowName: str, img: cv2.typing.MatLike, showCrosshair: bool = ...,
- fromCenter: bool = ..., printNotice: bool = ...,
+ windowName: str,
+ img: cv2.typing.MatLike,
+ showCrosshair: bool = ...,
+ fromCenter: bool = ...,
+ printNotice: bool = ...,
) -> cv2.typing.Rect: ...
-
-
@typing.overload
def selectROI(
windowName: str,
@@ -11154,8 +9500,6 @@ def selectROI(
fromCenter: bool = ...,
printNotice: bool = ...,
) -> cv2.typing.Rect: ...
-
-
@typing.overload
def selectROI(
img: cv2.typing.MatLike,
@@ -11163,8 +9507,6 @@ def selectROI(
fromCenter: bool = ...,
printNotice: bool = ...,
) -> cv2.typing.Rect: ...
-
-
@typing.overload
def selectROI(
img: UMat,
@@ -11172,22 +9514,22 @@ def selectROI(
fromCenter: bool = ...,
printNotice: bool = ...,
) -> cv2.typing.Rect: ...
-
-
@typing.overload
def selectROIs(
- windowName: str, img: cv2.typing.MatLike, showCrosshair: bool = ...,
- fromCenter: bool = ..., printNotice: bool = ...,
+ windowName: str,
+ img: cv2.typing.MatLike,
+ showCrosshair: bool = ...,
+ fromCenter: bool = ...,
+ printNotice: bool = ...,
) -> typing.Sequence[cv2.typing.Rect]: ...
-
-
@typing.overload
def selectROIs(
- windowName: str, img: UMat, showCrosshair: bool = ..., fromCenter: bool = ...,
+ windowName: str,
+ img: UMat,
+ showCrosshair: bool = ...,
+ fromCenter: bool = ...,
printNotice: bool = ...,
) -> typing.Sequence[cv2.typing.Rect]: ...
-
-
@typing.overload
def sepFilter2D(
src: cv2.typing.MatLike,
@@ -11199,8 +9541,6 @@ def sepFilter2D(
delta: float = ...,
borderType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def sepFilter2D(
src: UMat,
@@ -11212,44 +9552,20 @@ def sepFilter2D(
delta: float = ...,
borderType: int = ...,
) -> UMat: ...
-
-
@typing.overload
def setIdentity(mtx: cv2.typing.MatLike, s: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ...
@typing.overload
def setIdentity(mtx: UMat, s: cv2.typing.Scalar = ...) -> UMat: ...
-
-
def setLogLevel(level: int) -> int: ...
-
-
def setNumThreads(nthreads: int) -> None: ...
-
-
def setRNGSeed(seed: int) -> None: ...
-
-
def setTrackbarMax(trackbarname: str, winname: str, maxval: int) -> None: ...
-
-
def setTrackbarMin(trackbarname: str, winname: str, minval: int) -> None: ...
-
-
def setTrackbarPos(trackbarname: str, winname: str, pos: int) -> None: ...
-
-
def setUseOpenVX(flag: bool) -> None: ...
-
-
def setUseOptimized(onoff: bool) -> None: ...
-
-
def setWindowProperty(winname: str, prop_id: int, prop_value: float) -> None: ...
-
-
def setWindowTitle(winname: str, title: str) -> None: ...
-
-
@typing.overload
def solve(
src1: cv2.typing.MatLike,
@@ -11260,12 +9576,8 @@ def solve(
bool,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def solve(src1: UMat, src2: UMat, dst: UMat | None = ..., flags: int = ...) -> tuple[bool, UMat]: ...
-
-
@typing.overload
def solveCubic(
coeffs: cv2.typing.MatLike,
@@ -11274,12 +9586,8 @@ def solveCubic(
int,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def solveCubic(coeffs: UMat, roots: UMat | None = ...) -> tuple[int, UMat]: ...
-
-
@typing.overload
def solveLP(
Func: cv2.typing.MatLike,
@@ -11290,12 +9598,8 @@ def solveLP(
int,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def solveLP(Func: UMat, Constr: UMat, constr_eps: float, z: UMat | None = ...) -> tuple[int, UMat]: ...
-
-
@typing.overload
def solveLP(
Func: cv2.typing.MatLike,
@@ -11305,12 +9609,8 @@ def solveLP(
int,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def solveLP(Func: UMat, Constr: UMat, z: UMat | None = ...) -> tuple[int, UMat]: ...
-
-
@typing.overload
def solveP3P(
objectPoints: cv2.typing.MatLike,
@@ -11325,8 +9625,6 @@ def solveP3P(
typing.Sequence[cv2.typing.MatLike],
typing.Sequence[cv2.typing.MatLike],
]: ...
-
-
@typing.overload
def solveP3P(
objectPoints: UMat,
@@ -11341,8 +9639,6 @@ def solveP3P(
typing.Sequence[UMat],
typing.Sequence[UMat],
]: ...
-
-
@typing.overload
def solvePnP(
objectPoints: cv2.typing.MatLike,
@@ -11358,8 +9654,6 @@ def solvePnP(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def solvePnP(
objectPoints: UMat,
@@ -11375,8 +9669,6 @@ def solvePnP(
UMat,
UMat,
]: ...
-
-
@typing.overload
def solvePnPGeneric(
objectPoints: cv2.typing.MatLike,
@@ -11396,8 +9688,6 @@ def solvePnPGeneric(
typing.Sequence[cv2.typing.MatLike],
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def solvePnPGeneric(
objectPoints: UMat,
@@ -11417,8 +9707,6 @@ def solvePnPGeneric(
typing.Sequence[UMat],
UMat,
]: ...
-
-
@typing.overload
def solvePnPRansac(
objectPoints: cv2.typing.MatLike,
@@ -11439,8 +9727,6 @@ def solvePnPRansac(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def solvePnPRansac(
objectPoints: UMat,
@@ -11461,8 +9747,6 @@ def solvePnPRansac(
UMat,
UMat,
]: ...
-
-
@typing.overload
def solvePnPRansac(
objectPoints: cv2.typing.MatLike,
@@ -11480,8 +9764,6 @@ def solvePnPRansac(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def solvePnPRansac(
objectPoints: UMat,
@@ -11499,8 +9781,6 @@ def solvePnPRansac(
UMat,
UMat,
]: ...
-
-
@typing.overload
def solvePnPRefineLM(
objectPoints: cv2.typing.MatLike,
@@ -11514,8 +9794,6 @@ def solvePnPRefineLM(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def solvePnPRefineLM(
objectPoints: UMat,
@@ -11529,8 +9807,6 @@ def solvePnPRefineLM(
UMat,
UMat,
]: ...
-
-
@typing.overload
def solvePnPRefineVVS(
objectPoints: cv2.typing.MatLike,
@@ -11545,8 +9821,6 @@ def solvePnPRefineVVS(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def solvePnPRefineVVS(
objectPoints: UMat,
@@ -11561,8 +9835,6 @@ def solvePnPRefineVVS(
UMat,
UMat,
]: ...
-
-
@typing.overload
def solvePoly(
coeffs: cv2.typing.MatLike,
@@ -11572,24 +9844,16 @@ def solvePoly(
float,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def solvePoly(coeffs: UMat, roots: UMat | None = ..., maxIters: int = ...) -> tuple[float, UMat]: ...
-
-
@typing.overload
def sort(src: cv2.typing.MatLike, flags: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def sort(src: UMat, flags: int, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def sortIdx(src: cv2.typing.MatLike, flags: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def sortIdx(src: UMat, flags: int, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def spatialGradient(
src: cv2.typing.MatLike,
@@ -11601,8 +9865,6 @@ def spatialGradient(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def spatialGradient(
src: UMat,
@@ -11614,19 +9876,13 @@ def spatialGradient(
UMat,
UMat,
]: ...
-
-
@typing.overload
def split(
- m: cv2.typing.MatLike, mv: typing.Sequence[cv2.typing.MatLike]
- | None = ...,
+ m: cv2.typing.MatLike,
+ mv: typing.Sequence[cv2.typing.MatLike] | None = ...,
) -> typing.Sequence[cv2.typing.MatLike]: ...
-
-
@typing.overload
def split(m: UMat, mv: typing.Sequence[UMat] | None = ...) -> typing.Sequence[UMat]: ...
-
-
@typing.overload
def sqrBoxFilter(
src: cv2.typing.MatLike,
@@ -11637,8 +9893,6 @@ def sqrBoxFilter(
normalize: bool = ...,
borderType: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def sqrBoxFilter(
src: UMat,
@@ -11649,28 +9903,19 @@ def sqrBoxFilter(
normalize: bool = ...,
borderType: int = ...,
) -> UMat: ...
-
-
@typing.overload
def sqrt(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def sqrt(src: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def stackBlur(
- src: cv2.typing.MatLike, ksize: cv2.typing.Size,
+ src: cv2.typing.MatLike,
+ ksize: cv2.typing.Size,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def stackBlur(src: UMat, ksize: cv2.typing.Size, dst: UMat | None = ...) -> UMat: ...
-
-
def startWindowThread() -> int: ...
-
-
@typing.overload
def stereoCalibrate(
objectPoints: typing.Sequence[cv2.typing.MatLike],
@@ -11698,8 +9943,6 @@ def stereoCalibrate(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def stereoCalibrate(
objectPoints: typing.Sequence[UMat],
@@ -11727,8 +9970,6 @@ def stereoCalibrate(
UMat,
UMat,
]: ...
-
-
@typing.overload
def stereoCalibrate(
objectPoints: typing.Sequence[cv2.typing.MatLike],
@@ -11758,8 +9999,6 @@ def stereoCalibrate(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def stereoCalibrate(
objectPoints: typing.Sequence[UMat],
@@ -11789,8 +10028,6 @@ def stereoCalibrate(
UMat,
UMat,
]: ...
-
-
@typing.overload
def stereoCalibrateExtended(
objectPoints: typing.Sequence[cv2.typing.MatLike],
@@ -11824,8 +10061,6 @@ def stereoCalibrateExtended(
typing.Sequence[cv2.typing.MatLike],
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def stereoCalibrateExtended(
objectPoints: typing.Sequence[UMat],
@@ -11859,8 +10094,6 @@ def stereoCalibrateExtended(
typing.Sequence[UMat],
UMat,
]: ...
-
-
@typing.overload
def stereoRectify(
cameraMatrix1: cv2.typing.MatLike,
@@ -11887,8 +10120,6 @@ def stereoRectify(
cv2.typing.Rect,
cv2.typing.Rect,
]: ...
-
-
@typing.overload
def stereoRectify(
cameraMatrix1: UMat,
@@ -11915,8 +10146,6 @@ def stereoRectify(
cv2.typing.Rect,
cv2.typing.Rect,
]: ...
-
-
@typing.overload
def stereoRectifyUncalibrated(
points1: cv2.typing.MatLike,
@@ -11931,8 +10160,6 @@ def stereoRectifyUncalibrated(
cv2.typing.MatLike,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def stereoRectifyUncalibrated(
points1: UMat,
@@ -11947,36 +10174,29 @@ def stereoRectifyUncalibrated(
UMat,
UMat,
]: ...
-
-
@typing.overload
def stylization(
- src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...,
- sigma_s: float = ..., sigma_r: float = ...,
+ src: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike | None = ...,
+ sigma_s: float = ...,
+ sigma_r: float = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def stylization(src: UMat, dst: UMat | None = ..., sigma_s: float = ..., sigma_r: float = ...) -> UMat: ...
-
-
@typing.overload
def subtract(
- src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...,
- mask: cv2.typing.MatLike | None = ..., dtype: int = ...,
+ src1: cv2.typing.MatLike,
+ src2: cv2.typing.MatLike,
+ dst: cv2.typing.MatLike | None = ...,
+ mask: cv2.typing.MatLike | None = ...,
+ dtype: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def subtract(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ..., dtype: int = ...) -> UMat: ...
-
-
@typing.overload
def sumElems(src: cv2.typing.MatLike) -> cv2.typing.Scalar: ...
@typing.overload
def sumElems(src: UMat) -> cv2.typing.Scalar: ...
-
-
@typing.overload
def textureFlattening(
src: cv2.typing.MatLike,
@@ -11986,8 +10206,6 @@ def textureFlattening(
high_threshold: float = ...,
kernel_size: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def textureFlattening(
src: UMat,
@@ -11997,8 +10215,6 @@ def textureFlattening(
high_threshold: float = ...,
kernel_size: int = ...,
) -> UMat: ...
-
-
@typing.overload
def threshold(
src: cv2.typing.MatLike,
@@ -12010,47 +10226,32 @@ def threshold(
float,
cv2.typing.MatLike,
]: ...
-
-
@typing.overload
def threshold(src: UMat, thresh: float, maxval: float, type: int, dst: UMat | None = ...) -> tuple[float, UMat]: ...
-
-
@typing.overload
def trace(mtx: cv2.typing.MatLike) -> cv2.typing.Scalar: ...
@typing.overload
def trace(mtx: UMat) -> cv2.typing.Scalar: ...
-
-
@typing.overload
def transform(
- src: cv2.typing.MatLike, m: cv2.typing.MatLike,
+ src: cv2.typing.MatLike,
+ m: cv2.typing.MatLike,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def transform(src: UMat, m: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def transpose(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def transpose(src: UMat, dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def transposeND(
src: cv2.typing.MatLike,
order: typing.Sequence[int],
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def transposeND(src: UMat, order: typing.Sequence[int], dst: UMat | None = ...) -> UMat: ...
-
-
@typing.overload
def triangulatePoints(
projMatr1: cv2.typing.MatLike,
@@ -12059,8 +10260,6 @@ def triangulatePoints(
projPoints2: cv2.typing.MatLike,
points4D: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def triangulatePoints(
projMatr1: UMat,
@@ -12069,8 +10268,6 @@ def triangulatePoints(
projPoints2: UMat,
points4D: UMat | None = ...,
) -> UMat: ...
-
-
@typing.overload
def undistort(
src: cv2.typing.MatLike,
@@ -12079,15 +10276,14 @@ def undistort(
dst: cv2.typing.MatLike | None = ...,
newCameraMatrix: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def undistort(
- src: UMat, cameraMatrix: UMat, distCoeffs: UMat, dst: UMat |
- None = ..., newCameraMatrix: UMat | None = ...,
+ src: UMat,
+ cameraMatrix: UMat,
+ distCoeffs: UMat,
+ dst: UMat | None = ...,
+ newCameraMatrix: UMat | None = ...,
) -> UMat: ...
-
-
@typing.overload
def undistortImagePoints(
src: cv2.typing.MatLike,
@@ -12096,15 +10292,14 @@ def undistortImagePoints(
dst: cv2.typing.MatLike | None = ...,
arg1: cv2.typing.TermCriteria = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def undistortImagePoints(
- src: UMat, cameraMatrix: UMat, distCoeffs: UMat, dst: UMat |
- None = ..., arg1: cv2.typing.TermCriteria = ...,
+ src: UMat,
+ cameraMatrix: UMat,
+ distCoeffs: UMat,
+ dst: UMat | None = ...,
+ arg1: cv2.typing.TermCriteria = ...,
) -> UMat: ...
-
-
@typing.overload
def undistortPoints(
src: cv2.typing.MatLike,
@@ -12114,8 +10309,6 @@ def undistortPoints(
R: cv2.typing.MatLike | None = ...,
P: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def undistortPoints(
src: UMat,
@@ -12125,8 +10318,6 @@ def undistortPoints(
R: UMat | None = ...,
P: UMat | None = ...,
) -> UMat: ...
-
-
@typing.overload
def undistortPointsIter(
src: cv2.typing.MatLike,
@@ -12137,28 +10328,26 @@ def undistortPointsIter(
criteria: cv2.typing.TermCriteria,
dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def undistortPointsIter(
- src: UMat, cameraMatrix: UMat, distCoeffs: UMat, R: UMat, P: UMat,
- criteria: cv2.typing.TermCriteria, dst: UMat | None = ...,
+ src: UMat,
+ cameraMatrix: UMat,
+ distCoeffs: UMat,
+ R: UMat,
+ P: UMat,
+ criteria: cv2.typing.TermCriteria,
+ dst: UMat | None = ...,
) -> UMat: ...
-
-
def useOpenVX() -> bool: ...
-
-
def useOptimized() -> bool: ...
-
-
@typing.overload
def validateDisparity(
- disparity: cv2.typing.MatLike, cost: cv2.typing.MatLike, minDisparity: int,
- numberOfDisparities: int, disp12MaxDisp: int = ...,
+ disparity: cv2.typing.MatLike,
+ cost: cv2.typing.MatLike,
+ minDisparity: int,
+ numberOfDisparities: int,
+ disp12MaxDisp: int = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def validateDisparity(
disparity: UMat,
@@ -12167,20 +10356,12 @@ def validateDisparity(
numberOfDisparities: int,
disp12MaxDisp: int = ...,
) -> UMat: ...
-
-
@typing.overload
def vconcat(src: typing.Sequence[cv2.typing.MatLike], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
@typing.overload
def vconcat(src: typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ...
-
-
def waitKey(delay: int = ...) -> int: ...
-
-
def waitKeyEx(delay: int = ...) -> int: ...
-
-
@typing.overload
def warpAffine(
src: cv2.typing.MatLike,
@@ -12191,8 +10372,6 @@ def warpAffine(
borderMode: int = ...,
borderValue: cv2.typing.Scalar = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def warpAffine(
src: UMat,
@@ -12203,8 +10382,6 @@ def warpAffine(
borderMode: int = ...,
borderValue: cv2.typing.Scalar = ...,
) -> UMat: ...
-
-
@typing.overload
def warpPerspective(
src: cv2.typing.MatLike,
@@ -12215,8 +10392,6 @@ def warpPerspective(
borderMode: int = ...,
borderValue: cv2.typing.Scalar = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def warpPerspective(
src: UMat,
@@ -12227,28 +10402,28 @@ def warpPerspective(
borderMode: int = ...,
borderValue: cv2.typing.Scalar = ...,
) -> UMat: ...
-
-
@typing.overload
def warpPolar(
- src: cv2.typing.MatLike, dsize: cv2.typing.Size, center: cv2.typing.Point2f,
- maxRadius: float, flags: int, dst: cv2.typing.MatLike | None = ...,
+ src: cv2.typing.MatLike,
+ dsize: cv2.typing.Size,
+ center: cv2.typing.Point2f,
+ maxRadius: float,
+ flags: int,
+ dst: cv2.typing.MatLike | None = ...,
) -> cv2.typing.MatLike: ...
-
-
@typing.overload
def warpPolar(
- src: UMat, dsize: cv2.typing.Size, center: cv2.typing.Point2f,
- maxRadius: float, flags: int, dst: UMat | None = ...,
+ src: UMat,
+ dsize: cv2.typing.Size,
+ center: cv2.typing.Point2f,
+ maxRadius: float,
+ flags: int,
+ dst: UMat | None = ...,
) -> UMat: ...
-
-
@typing.overload
def watershed(image: cv2.typing.MatLike, markers: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
@typing.overload
def watershed(image: UMat, markers: UMat) -> UMat: ...
-
-
@typing.overload
def writeOpticalFlow(path: str, flow: cv2.typing.MatLike) -> bool: ...
@typing.overload
diff --git a/typings/cv2/mat_wrapper/__init__.pyi b/typings/cv2/mat_wrapper/__init__.pyi
index db0f36e1..70572643 100644
--- a/typings/cv2/mat_wrapper/__init__.pyi
+++ b/typings/cv2/mat_wrapper/__init__.pyi
@@ -1,6 +1,7 @@
+from typing import TypeAlias
+
import numpy as np
from _typeshed import Unused
-from typing_extensions import TypeAlias
__all__: list[str] = []
_NDArray: TypeAlias = np.ndarray[float, np.dtype[np.generic]]
diff --git a/typings/multiprocessing/connection.pyi b/typings/multiprocessing/connection.pyi
index 1cc33506..3f21858c 100644
--- a/typings/multiprocessing/connection.pyi
+++ b/typings/multiprocessing/connection.pyi
@@ -1,10 +1,9 @@
# https://github.com/python/typeshed/blob/main/stdlib/multiprocessing/connection.pyi
import sys
from types import TracebackType
-from typing import Any, Generic, SupportsIndex, TypeVar
+from typing import Any, Generic, Self, SupportsIndex, TypeVar
from _typeshed import ReadableBuffer
-from typing_extensions import Self
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
@@ -29,7 +28,10 @@ class _ConnectionBase(Generic[_T1, _T2]):
def __enter__(self) -> Self: ...
def __exit__(
- self, exc_type: type[BaseException] | None, exc_value: BaseException | None, exc_tb: TracebackType | None,
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ exc_tb: TracebackType | None,
) -> None: ...
diff --git a/typings/multiprocessing/test_cases/check_pipe_connections.py b/typings/multiprocessing/test_cases/check_pipe_connections.py
index eee9476b..a5b1cd2a 100644
--- a/typings/multiprocessing/test_cases/check_pipe_connections.py
+++ b/typings/multiprocessing/test_cases/check_pipe_connections.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
from multiprocessing.connection import Pipe, PipeConnection
# Less type-safe, but no extra variable. User could mix up send and recv types.
@@ -10,7 +8,8 @@
# More type safe, but extra variable
connections_wrong: tuple[
- PipeConnection[str, int], PipeConnection[str, int],
+ PipeConnection[str, int],
+ PipeConnection[str, int],
] = Pipe() # pyright: ignore[reportGeneralTypeIssues]
connections_ok: tuple[PipeConnection[str, int], PipeConnection[int, str]] = Pipe()
a, b = connections_ok