diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index 5f3a2b988..f68442fae 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -18,6 +18,10 @@
"onAutoForward": "silent"
}
},
+ "mounts": [
+ "source=${localWorkspaceFolder}/config,target=/config,type=bind,consistency=cached"
+ ],
+ "userEnvProbe": "loginInteractiveShell",
"runArgs": ["-e", "GIT_EDITOR=code --wait"],
"customizations": {
"vscode": {
@@ -26,6 +30,9 @@
"ms-python.vscode-pylance",
"ms-python.isort",
"ms-python.flake8",
+ "ms-python.black-formatter",
+ "ms-python.pylint",
+ "ms-python.mypy-type-checker",
"redhat.vscode-yaml",
"esbenp.prettier-vscode",
"GitHub.vscode-pull-request-github",
@@ -34,29 +41,26 @@
"eamodio.gitlens",
"dbaeumer.vscode-eslint",
"ms-azuretools.vscode-docker",
- "unifiedjs.vscode-mdx"
+ "unifiedjs.vscode-mdx",
+ "ckolkman.vscode-postgres"
],
"settings": {
"python.pythonPath": "/usr/local/bin/python",
- "python.linting.enabled": true,
- "python.linting.pylintEnabled": true,
- "python.linting.pylintPath": "/usr/local/bin/pylint",
- "python.linting.flake8Enabled": true,
- "python.linting.flake8Path": "/usr/local/bin/flake8",
- "python.linting.pycodestylePath": "/usr/local/bin/pycodestyle",
- "python.linting.pydocstylePath": "/usr/local/bin/pydocstyle",
- "python.formatting.blackPath": "/usr/local/bin/black",
- "python.formatting.provider": "black",
"python.testing.pytestArgs": ["--no-cov"],
- "python.linting.mypyEnabled": true,
- "python.linting.mypyPath": "/usr/local/bin/mypy",
+ "isort.check": true,
"editor.rulers": [80],
"[python]": {
+ "editor.defaultFormatter": "ms-python.black-formatter",
"editor.rulers": [88],
"editor.codeActionsOnSave": {
- "source.organizeImports": true
+ "source.organizeImports": "explicit"
}
},
+ "mypy-type-checker.importStrategy": "fromEnvironment",
+ "pylint.importStrategy": "fromEnvironment",
+ "black-formatter.importStrategy": "fromEnvironment",
+ "flake8.importStrategy": "fromEnvironment",
+ "isort.importStrategy": "fromEnvironment",
"editor.formatOnPaste": false,
"editor.formatOnSave": true,
"editor.formatOnType": true,
diff --git a/.devcontainer/setup.sh b/.devcontainer/setup.sh
index d15a03b39..61941579c 100755
--- a/.devcontainer/setup.sh
+++ b/.devcontainer/setup.sh
@@ -25,7 +25,7 @@ do
sed -i "s/$FILE=true/$FILE=false/g" $HOME/.bashrc
done
-# Symlink config
+# Create default config if it is missing
cd $WORKSPACE_DIR
mkdir -p $WORKSPACE_DIR/config
FILE=$WORKSPACE_DIR/config/config.yaml
@@ -35,7 +35,15 @@ else
echo "Creating default config"
python3 -c "import viseron.config; viseron.config.create_default_config('$FILE')"
fi
-ln -s $WORKSPACE_DIR/config/config.yaml /config/config.yaml
+
+# Create symlink to config file
+FILE=/config/config.yaml
+if test -f "$FILE"; then
+ echo "Config symlink already exists"
+else
+ echo "Creating config symlink"
+ ln -s $WORKSPACE_DIR/config/config.yaml /config/config.yaml
+fi
# Create .env.local
FILE=$WORKSPACE_DIR/frontend/.env.local
@@ -45,3 +53,6 @@ else
echo "Creating frontend .env.local"
echo "VITE_PROXY_HOST=localhost:8888" > $FILE
fi
+
+# Generate locale
+locale-gen
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 63c7cf3f8..bacfccce4 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -49,7 +49,7 @@ jobs:
run: |
sudo apt-get update
sudo apt-get install -y --no-install-recommends libgirepository1.0-dev
- python3 -m venv venv
+ python3 -m venv --system-site-packages venv
- name: Install requirements into Python virtual environment
if: steps.cache-venv.outputs.cache-hit != 'true'
uses: ./.github/templates/run_in_venv
@@ -422,7 +422,11 @@ jobs:
docker-compose --file azure-pipelines/docker-compose-build.yaml --env-file azure-pipelines/.env build --build-arg BUILDKIT_INLINE_CACHE=1 amd64-viseron-tests
- name: Run pytest
run: |
- docker-compose --file azure-pipelines/docker-compose-build.yaml --env-file azure-pipelines/.env up amd64-viseron-tests
+ if docker-compose --file azure-pipelines/docker-compose-build.yaml --env-file azure-pipelines/.env up amd64-viseron-tests; then
+ exit 0
+ else
+ exit 1
+ fi
- name: Copy .coverage to host
run: |
docker cp amd64-viseron-tests:/src/coverage.xml coverage.xml
diff --git a/.gitignore b/.gitignore
index 22fc1f232..f5b9a1759 100644
--- a/.gitignore
+++ b/.gitignore
@@ -52,6 +52,8 @@
!/tests/*
!/tests/components
!/tests/components/**
+!/tests/domains
+!/tests/domains/**
!/tests/helpers
!/tests/helpers/*
@@ -81,7 +83,7 @@
!/frontend/src/**
!/frontend/tests
!/frontend/tests/**
-!/frontend/.eslintrc.js
+!/frontend/.eslintrc.cjs
!/frontend/.prettierrc.json
!/frontend/404.html
!/frontend/index.html
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 85f38cd44..9b4954ada 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,7 +6,9 @@ repos:
rev: v3.2.2
hooks:
- id: pyupgrade
- args: [--py38-plus]
+ args:
+ - --py38-plus
+ - --keep-runtime-typing
files: ^(viseron|tests)/.+\.py$
- repo: https://github.com/PyCQA/autoflake
rev: v2.0.0
@@ -88,4 +90,4 @@ repos:
if [ -n "$test" ]; then echo "Found null description in config.json:"; echo "$test"; false; fi
'
pass_filenames: false
- always_run: true
\ No newline at end of file
+ always_run: true
diff --git a/.pylintrc b/.pylintrc
index fd00420af..fb59306ba 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -82,7 +82,8 @@ disable=format,
too-many-statements,
too-many-boolean-expressions,
consider-using-with,
- consider-using-f-string
+ consider-using-f-string,
+ consider-using-dict-items
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
@@ -191,7 +192,7 @@ contextmanager-decorators=contextlib.contextmanager
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
-generated-members=cv2.*
+generated-members=cv2.*,alembic.*
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
@@ -214,7 +215,7 @@ ignored-classes=optparse.Values,thread._local,_thread._local
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
-ignored-modules=
+ignored-modules=alembic.context,alembic.op
# Show a hint with possible names when a member name was not found. The aspect
# of finding the hint is based on edit distance.
@@ -309,6 +310,8 @@ good-names=i,
x2,
y1,
y2,
+ mb,
+ gb,
Run,
_,
T
diff --git a/.vscode/extensions.json b/.vscode/extensions.json
index ef081767c..37b434dd3 100644
--- a/.vscode/extensions.json
+++ b/.vscode/extensions.json
@@ -1,14 +1,18 @@
{
- "recommendations": [
- "ms-python.python",
- "ms-python.vscode-pylance",
- "ms-python.isort",
- "ms-python.flake8",
- "redhat.vscode-yaml",
- "esbenp.prettier-vscode",
- "eamodio.gitlens",
- "dbaeumer.vscode-eslint",
- "ms-azuretools.vscode-docker",
- "unifiedjs.vscode-mdx"
- ]
- }
\ No newline at end of file
+ "recommendations": [
+ "ms-python.python",
+ "ms-python.vscode-pylance",
+ "ms-python.isort",
+ "ms-python.flake8",
+ "ms-python.black-formatter",
+ "ms-python.pylint",
+ "ms-python.mypy-type-checker",
+ "redhat.vscode-yaml",
+ "esbenp.prettier-vscode",
+ "eamodio.gitlens",
+ "dbaeumer.vscode-eslint",
+ "ms-azuretools.vscode-docker",
+ "unifiedjs.vscode-mdx",
+ "ckolkman.vscode-postgres"
+ ]
+}
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 93cdd5346..22ea68003 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -2,5 +2,15 @@
"github-actions.workflows.pinned.workflows": [".github/workflows/ci.yaml"],
"python.testing.pytestArgs": ["tests/"],
"python.testing.unittestEnabled": false,
- "python.testing.pytestEnabled": true
+ "python.testing.pytestEnabled": true,
+ "pylint.severity": {
+ "convention": "Warning"
+ },
+ "[python]": {
+ "editor.rulers": [88],
+ "editor.codeActionsOnSave": {
+ "source.organizeImports": "explicit"
+ },
+ "editor.defaultFormatter": "ms-python.black-formatter"
+ }
}
diff --git a/.vscode/tasks.json b/.vscode/tasks.json
index 0cc32947a..79ebcb4f6 100644
--- a/.vscode/tasks.json
+++ b/.vscode/tasks.json
@@ -8,7 +8,7 @@
"group": "test",
"presentation": {
"reveal": "always",
- "panel": "new"
+ "panel": "dedicated"
},
"problemMatcher": []
},
@@ -92,6 +92,23 @@
},
"problemMatcher": []
},
+ {
+ "label": "Python: Generate database migration",
+ "detail": "Generates a new revision file for the database.",
+ "type": "shell",
+ "command": "alembic revision --autogenerate -m '${input:revision_message}'",
+ "options": {
+ "cwd": "${workspaceFolder}/viseron/components/storage"
+ },
+ "group": {
+ "kind": "build"
+ },
+ "presentation": {
+ "reveal": "always",
+ "panel": "new"
+ },
+ "problemMatcher": []
+ },
// Frontend tasks
{
"label": "Frontend: Build",
@@ -274,5 +291,9 @@
"type": "promptString",
"description": "The component to generate docs for",
},
+ {
+ "id": "revision_message",
+ "type": "promptString",
+ "description": "Revision message (should end with a period)",}
]
}
diff --git a/azure-pipelines/docker-compose-build.yaml b/azure-pipelines/docker-compose-build.yaml
index a9870e25f..a516bc125 100644
--- a/azure-pipelines/docker-compose-build.yaml
+++ b/azure-pipelines/docker-compose-build.yaml
@@ -120,6 +120,7 @@ services:
WHEELS_VERSION: "$WHEELS_VERSION"
S6_OVERLAY_ARCH: amd64
S6_OVERLAY_VERSION: "$S6_OVERLAY_VERSION"
+ UBUNTU_VERSION: "$UBUNTU_VERSION"
cache_from:
- roflcoopter/amd64-viseron:dev
image: roflcoopter/amd64-viseron:dev
@@ -133,14 +134,20 @@ services:
VISERON_VERSION: dev
image: roflcoopter/amd64-viseron-tests:dev
container_name: amd64-viseron-tests
- entrypoint: ""
+ environment:
+ - PGID=1000
+ - PUID=1000
command: >
- bash -c "pytest --cov=viseron/ --cov-report term-missing -s tests
- && coverage xml"
-
+ bash -c "
+ chown -R abc:abc /src &&
+ su abc -c '
+ cd /src &&
+ pytest --cov=viseron/ --cov-report term-missing -s tests &&
+ coverage xml
+ '
+ "
amd64-viseron-tests-tox:
<<: *amd64-viseron-tests
- entrypoint: pytest
amd64-viseron-vulture:
<<: *amd64-viseron-tests
@@ -247,6 +254,7 @@ services:
WHEELS_VERSION: "$WHEELS_VERSION"
S6_OVERLAY_ARCH: amd64
S6_OVERLAY_VERSION: "$S6_OVERLAY_VERSION"
+ UBUNTU_VERSION: "$UBUNTU_VERSION"
cache_from:
- roflcoopter/amd64-cuda-viseron:dev
image: roflcoopter/amd64-cuda-viseron:dev
@@ -343,6 +351,7 @@ services:
WHEELS_VERSION: "$WHEELS_VERSION"
S6_OVERLAY_ARCH: armhf
S6_OVERLAY_VERSION: "$S6_OVERLAY_VERSION"
+ UBUNTU_VERSION: "$UBUNTU_VERSION"
cache_from:
- roflcoopter/rpi3-viseron:dev
image: roflcoopter/rpi3-viseron:dev
@@ -430,6 +439,7 @@ services:
WHEELS_VERSION: "$WHEELS_VERSION"
S6_OVERLAY_ARCH: aarch64
S6_OVERLAY_VERSION: "$S6_OVERLAY_VERSION"
+ UBUNTU_VERSION: "$UBUNTU_VERSION"
cache_from:
- roflcoopter/aarch64-viseron:dev
image: roflcoopter/aarch64-viseron:dev
@@ -568,6 +578,7 @@ services:
WHEELS_VERSION: "$WHEELS_VERSION"
S6_OVERLAY_ARCH: aarch64
S6_OVERLAY_VERSION: "$S6_OVERLAY_VERSION"
+ UBUNTU_VERSION: "$UBUNTU_VERSION"
cache_from:
- roflcoopter/jetson-nano-viseron:dev
image: roflcoopter/jetson-nano-viseron:dev
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 79711b060..5d1425678 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -3,19 +3,52 @@ ARG BASE_VERSION
ARG OPENCV_VERSION
ARG FFMPEG_VERSION
ARG WHEELS_VERSION
+ARG UBUNTU_VERSION
FROM roflcoopter/${ARCH}-opencv:${OPENCV_VERSION} as opencv
FROM roflcoopter/${ARCH}-ffmpeg:${FFMPEG_VERSION} as ffmpeg
FROM roflcoopter/${ARCH}-wheels:${WHEELS_VERSION} as wheels
-FROM node:14.18.3 as frontend
+
+# Build GPAC
+FROM ubuntu:${UBUNTU_VERSION} AS gpac
+
+ENV \
+ DEBIAN_FRONTEND=noninteractive
+
+RUN \
+ apt-get update && apt-get install -y --no-install-recommends \
+ build-essential \
+ ca-certificates \
+ pkg-config \
+ g++ \
+ git \
+ cmake \
+ yasm \
+ zlib1g-dev
+
+RUN \
+ git clone https://github.com/gpac/gpac.git
+RUN \
+ cd gpac && \
+ ./configure --static-bin / && \
+ make -j$(nproc)
+
+
+# Build frontend
+FROM node:20.10.0 as frontend
WORKDIR /frontend
COPY frontend/package.json ./
COPY frontend/package-lock.json ./
+# npm 10+ fails to install in Docker for some reason
+RUN npm install -g npm@9
+RUN npm ci --legacy-peer-deps
+
COPY frontend/ ./
+RUN npm run build
-RUN npm ci && npm run build
+# Build Viseron
FROM roflcoopter/${ARCH}-base:${BASE_VERSION}
WORKDIR /src
@@ -29,12 +62,14 @@ ARG EXTRA_APT_PACKAGES
ENV \
DEBIAN_FRONTEND=noninteractive \
S6_KEEP_ENV=1 \
- S6_SERVICES_GRACETIME=20000 \
+ S6_SERVICES_GRACETIME=30000 \
S6_KILL_GRACETIME=1000 \
PATH=$PATH:/home/abc/bin \
LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib \
PYTHONPATH=$PYTHONPATH:/usr/local/lib/python3.8/site-packages \
- OPENCV_OPENCL_CACHE_ENABLE=false
+ OPENCV_OPENCL_CACHE_ENABLE=false \
+ PGDATA=/config/postgresql \
+ PG_COLOR="always"
ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-${S6_OVERLAY_ARCH}-installer /tmp/s6-overlay-installer
@@ -50,6 +85,9 @@ RUN \
libgomp1 \
# dlib Dependencies
libopenblas-base \
+ # PostgreSQL
+ postgresql \
+ postgresql-contrib \
# Install extra apt packages from build-arg
${EXTRA_APT_PACKAGES} \
# GStreamer
@@ -91,6 +129,9 @@ RUN \
VOLUME /config
VOLUME /recordings
+VOLUME /segments
+VOLUME /snapshots
+VOLUME /thumbnails
ENTRYPOINT ["/init"]
@@ -98,6 +139,7 @@ COPY docker/ffprobe_wrapper /home/abc/bin/ffprobe
COPY docker/ffmpeg_wrapper /home/abc/bin/ffmpeg
COPY --from=opencv /opt/opencv /usr/local/
COPY --from=ffmpeg /usr/local /ffmpeg/
+COPY --from=gpac /gpac/bin/gcc /usr/bin
COPY rootfs/ /
COPY --from=frontend /frontend/dist /src/viseron/frontend/
COPY viseron /src/viseron/
diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev
index 09a2832d8..6c72e6bfa 100644
--- a/docker/Dockerfile.dev
+++ b/docker/Dockerfile.dev
@@ -5,9 +5,13 @@ ENV SHELL /bin/bash
RUN apt-get update \
&& apt-get install --no-install-recommends -y \
curl \
- && curl -fsSL https://deb.nodesource.com/setup_16.x | bash - \
+ locales \
+ && curl -fsSL https://deb.nodesource.com/setup_20.x | bash - \
&& apt-get install --no-install-recommends -y \
- nodejs
+ nodejs \
+ && npm install -g npm@latest \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
COPY requirements_test.txt requirements_test.txt
RUN pip3 install -r requirements_test.txt \
@@ -16,4 +20,10 @@ RUN pip3 install -r requirements_test.txt \
RUN rm -r /etc/services.d/viseron
-WORKDIR /workspaces/viseron
+# Ensure at least the en_US.UTF-8 UTF-8 locale is available.
+RUN echo "LC_ALL=en_US.UTF-8" >> /etc/environment \
+ && echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen \
+ && echo "LANG=en_US.UTF-8" > /etc/locale.conf \
+ && locale-gen en_US.UTF-8
+
+WORKDIR /workspaces/viseron
\ No newline at end of file
diff --git a/docker/Dockerfile.tests b/docker/Dockerfile.tests
index 8c9a330fb..450c4ab23 100644
--- a/docker/Dockerfile.tests
+++ b/docker/Dockerfile.tests
@@ -10,6 +10,8 @@ ADD requirements_test.txt requirements_test.txt
RUN \
pip3 install -r requirements_test.txt
+RUN rm -r /etc/services.d/viseron
+
COPY .coveragerc /src/
COPY viseron /src/viseron/
COPY tests /src/tests/
diff --git a/docs/docs/documentation/installation.mdx b/docs/docs/documentation/installation.mdx
index c05192627..99634fa2a 100644
--- a/docs/docs/documentation/installation.mdx
+++ b/docs/docs/documentation/installation.mdx
@@ -43,7 +43,8 @@ You have to change the values between the brackets `{}` to match your setup.
:::
-64-bit Linux machine
+64-bit Linux machine
64-bit Linux machine with VAAPI (Intel NUC for example)
+64-bit Linux machine with VAAPI (Intel NUC for example)
RaspberryPi 3b+
+RaspberryPi 3b+
${description}`;
+ }
return (
{item.default.toString()}
)
);
}
+
if (item.optional) {
return ")";
}
@@ -180,7 +213,11 @@ function buildHeader(item: any) {
[styles.true]: !optional,
})}
>
- {optional ? "optional" : " required"}
+ {optional
+ ? "optional"
+ : item.deprecated
+ ? " deprecated"
+ : " required"}
{getDefault(item)}
diff --git a/docs/src/pages/components-explorer/_components/ComponentTroubleshooting/index.tsx b/docs/src/pages/components-explorer/_components/ComponentTroubleshooting/index.tsx
new file mode 100644
index 000000000..82b6e6277
--- /dev/null
+++ b/docs/src/pages/components-explorer/_components/ComponentTroubleshooting/index.tsx
@@ -0,0 +1,29 @@
+import React from "react";
+
+import CodeBlock from "@theme/CodeBlock";
+
+import { Component } from "@site/src/types";
+
+function ComponentTroubleshooting({
+ meta,
+ logs,
+}: {
+ meta: Component;
+ logs?: string[] | undefined;
+}) {
+ return (
+ {meta.name}
, add the following to
+ your config.yaml
+ true
, objects matching this filter will be stored in the database, as well as having a snapshot saved. Labels with trigger_recorder
set to true
will always be stored when a recording starts, regardless of this setting.",
+ "optional": true,
+ "default": true
+ },
+ {
+ "type": "integer",
+ "name": "store_interval",
+ "description": "The interval at which the label should be stored in the database, in seconds. If set to 0, the label will be stored every time it is detected.",
+ "optional": true,
+ "default": 60
+ },
{
"type": "boolean",
"name": "require_motion",
@@ -278,6 +292,20 @@
"optional": true,
"default": true
},
+ {
+ "type": "boolean",
+ "name": "store",
+ "description": "If set to true
, objects matching this filter will be stored in the database, as well as having a snapshot saved. Labels with trigger_recorder
set to true
will always be stored when a recording starts, regardless of this setting.",
+ "optional": true,
+ "default": true
+ },
+ {
+ "type": "integer",
+ "name": "store_interval",
+ "description": "The interval at which the label should be stored in the database, in seconds. If set to 0, the label will be stored every time it is detected.",
+ "optional": true,
+ "default": 60
+ },
{
"type": "boolean",
"name": "require_motion",
diff --git a/docs/src/pages/components-explorer/components/codeprojectai/index.mdx b/docs/src/pages/components-explorer/components/codeprojectai/index.mdx
index ff016996f..828f7c2c0 100644
--- a/docs/src/pages/components-explorer/components/codeprojectai/index.mdx
+++ b/docs/src/pages/components-explorer/components/codeprojectai/index.mdx
@@ -1,5 +1,6 @@
import ComponentConfiguration from "@site/src/pages/components-explorer/_components/ComponentConfiguration";
import ComponentHeader from "@site/src/pages/components-explorer/_components/ComponentHeader";
+import ComponentTroubleshooting from "@site/src/pages/components-explorer/_components/ComponentTroubleshooting";
import FaceRecognition from "@site/src/pages/components-explorer/_domains/face_recognition/index.mdx";
import FaceRecognitionLabels from "@site/src/pages/components-explorer/_domains/face_recognition/labels.mdx";
import FaceRecognitionTrain from "@site/src/pages/components-explorer/_domains/face_recognition/train.mdx";
@@ -126,3 +127,7 @@ codeprojectai:
### Mask
true
, objects matching this filter will be stored in the database, as well as having a snapshot saved. Labels with trigger_recorder
set to true
will always be stored when a recording starts, regardless of this setting.",
+ "optional": true,
+ "default": true
+ },
+ {
+ "type": "integer",
+ "name": "store_interval",
+ "description": "The interval at which the label should be stored in the database, in seconds. If set to 0, the label will be stored every time it is detected.",
+ "optional": true,
+ "default": 60
+ },
{
"type": "boolean",
"name": "require_motion",
@@ -255,6 +269,20 @@
"optional": true,
"default": true
},
+ {
+ "type": "boolean",
+ "name": "store",
+ "description": "If set to true
, objects matching this filter will be stored in the database, as well as having a snapshot saved. Labels with trigger_recorder
set to true
will always be stored when a recording starts, regardless of this setting.",
+ "optional": true,
+ "default": true
+ },
+ {
+ "type": "integer",
+ "name": "store_interval",
+ "description": "The interval at which the label should be stored in the database, in seconds. If set to 0, the label will be stored every time it is detected.",
+ "optional": true,
+ "default": 60
+ },
{
"type": "boolean",
"name": "require_motion",
diff --git a/docs/src/pages/components-explorer/components/darknet/index.mdx b/docs/src/pages/components-explorer/components/darknet/index.mdx
index 0e37552ae..9e42ee25e 100644
--- a/docs/src/pages/components-explorer/components/darknet/index.mdx
+++ b/docs/src/pages/components-explorer/components/darknet/index.mdx
@@ -4,6 +4,7 @@ import Tabs from "@theme/Tabs";
import ComponentConfiguration from "@site/src/pages/components-explorer/_components/ComponentConfiguration";
import ComponentHeader from "@site/src/pages/components-explorer/_components/ComponentHeader";
+import ComponentTroubleshooting from "@site/src/pages/components-explorer/_components/ComponentTroubleshooting";
import ObjectDetector from "@site/src/pages/components-explorer/_domains/object_detector/index.mdx";
import ObjectDetectorLabels from "@site/src/pages/components-explorer/_domains/object_detector/labels.mdx";
import ObjectDetectorMask from "@site/src/pages/components-explorer/_domains/object_detector/mask.mdx";
@@ -213,3 +214,7 @@ The `dnn_backend` and `dnn_target` controls how the model runs.
:::info
When not running on CUDA, OpenCVs implementation of Darknet is used.
:::
+
+## Troubleshooting
+
+true
, objects matching this filter will be stored in the database, as well as having a snapshot saved. Labels with trigger_recorder
set to true
will always be stored when a recording starts, regardless of this setting.",
+ "optional": true,
+ "default": true
+ },
+ {
+ "type": "integer",
+ "name": "store_interval",
+ "description": "The interval at which the label should be stored in the database, in seconds. If set to 0, the label will be stored every time it is detected.",
+ "optional": true,
+ "default": 60
+ },
{
"type": "boolean",
"name": "require_motion",
@@ -285,6 +299,20 @@
"optional": true,
"default": true
},
+ {
+ "type": "boolean",
+ "name": "store",
+ "description": "If set to true
, objects matching this filter will be stored in the database, as well as having a snapshot saved. Labels with trigger_recorder
set to true
will always be stored when a recording starts, regardless of this setting.",
+ "optional": true,
+ "default": true
+ },
+ {
+ "type": "integer",
+ "name": "store_interval",
+ "description": "The interval at which the label should be stored in the database, in seconds. If set to 0, the label will be stored every time it is detected.",
+ "optional": true,
+ "default": 60
+ },
{
"type": "boolean",
"name": "require_motion",
diff --git a/docs/src/pages/components-explorer/components/deepstack/index.mdx b/docs/src/pages/components-explorer/components/deepstack/index.mdx
index 255672701..a90f3718a 100644
--- a/docs/src/pages/components-explorer/components/deepstack/index.mdx
+++ b/docs/src/pages/components-explorer/components/deepstack/index.mdx
@@ -3,6 +3,7 @@ import CodeBlock from "@theme/CodeBlock";
import ComponentConfiguration from "@site/src/pages/components-explorer/_components/ComponentConfiguration";
import ComponentHeader from "@site/src/pages/components-explorer/_components/ComponentHeader";
+import ComponentTroubleshooting from "@site/src/pages/components-explorer/_components/ComponentTroubleshooting";
import FaceRecognition from "@site/src/pages/components-explorer/_domains/face_recognition/index.mdx";
import FaceRecognitionLabels from "@site/src/pages/components-explorer/_domains/face_recognition/labels.mdx";
import FaceRecognitionTrain from "@site/src/pages/components-explorer/_domains/face_recognition/train.mdx";
@@ -98,3 +99,7 @@ deepstack:
### Train
true
, objects matching this filter will be stored in the database, as well as having a snapshot saved. Labels with trigger_recorder
set to true
will always be stored when a recording starts, regardless of this setting.",
+ "optional": true,
+ "default": true
+ },
+ {
+ "type": "integer",
+ "name": "store_interval",
+ "description": "The interval at which the label should be stored in the database, in seconds. If set to 0, the label will be stored every time it is detected.",
+ "optional": true,
+ "default": 60
+ },
{
"type": "boolean",
"name": "require_motion",
@@ -255,6 +269,20 @@
"optional": true,
"default": true
},
+ {
+ "type": "boolean",
+ "name": "store",
+ "description": "If set to true
, objects matching this filter will be stored in the database, as well as having a snapshot saved. Labels with trigger_recorder
set to true
will always be stored when a recording starts, regardless of this setting.",
+ "optional": true,
+ "default": true
+ },
+ {
+ "type": "integer",
+ "name": "store_interval",
+ "description": "The interval at which the label should be stored in the database, in seconds. If set to 0, the label will be stored every time it is detected.",
+ "optional": true,
+ "default": 60
+ },
{
"type": "boolean",
"name": "require_motion",
diff --git a/docs/src/pages/components-explorer/components/edgetpu/index.mdx b/docs/src/pages/components-explorer/components/edgetpu/index.mdx
index 9f6b2ef60..99a5873d1 100644
--- a/docs/src/pages/components-explorer/components/edgetpu/index.mdx
+++ b/docs/src/pages/components-explorer/components/edgetpu/index.mdx
@@ -1,5 +1,6 @@
import ComponentConfiguration from "@site/src/pages/components-explorer/_components/ComponentConfiguration";
import ComponentHeader from "@site/src/pages/components-explorer/_components/ComponentHeader";
+import ComponentTroubleshooting from "@site/src/pages/components-explorer/_components/ComponentTroubleshooting";
import ImageClassification from "@site/src/pages/components-explorer/_domains/image_classification/index.mdx";
import ImageClassificationLabels from "@site/src/pages/components-explorer/_domains/image_classification/labels.mdx";
import ObjectDetector from "@site/src/pages/components-explorer/_domains/object_detector/index.mdx";
@@ -102,3 +103,7 @@ It was chosen because it has high accuracy and low latency.
More image classification models can be found on the [coral.ai website](https://coral.ai/models/image-classification/)
There you will also find information to help you understand if you might want to switch to another model.
+
+## Troubleshooting
+
+mp4
is the only supported extension."
+ },
"description": "The file extension used for recordings.",
- "optional": true,
- "default": "mp4"
+ "deprecated": true,
+ "default": null
},
{
"type": "map",
@@ -716,16 +736,20 @@
{
"type": "boolean",
"name": "save_to_disk",
- "description": "If true
, the thumbnail that is created on start of recording is saved to {folder}/{camera_identifier}/latest_thumbnail.jpg
",
+ "description": "If true
, the thumbnail that is created on start of recording is saved to {camera_identifier}/latest_thumbnail.jpg
Full path depends on the storage component tier configuration.",
"optional": true,
"default": true
},
{
"type": "string",
- "name": "filename_pattern",
+ "name": {
+ "type": "deprecated",
+ "name": "filename_pattern",
+ "value": "Thumbnails are stored with the same filename as the recording ID in the database, for example: 1.jpg, 2.jpg, 3.jpg etc."
+ },
"description": "A strftime pattern for saved thumbnails.
Default pattern results in filenames like: 23:59:59.jpg
.",
- "optional": true,
- "default": "%H:%M:%S"
+ "deprecated": true,
+ "default": null
}
],
"name": "thumbnail",
@@ -733,6 +757,515 @@
"optional": true,
"default": null
},
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "list",
+ "values": [
+ [
+ {
+ "type": "string",
+ "name": "path",
+ "description": "Path to store files in. Cannot be /tmp
or /tmp/viseron
.",
+ "required": true,
+ "default": null
+ },
+ {
+ "type": "boolean",
+ "name": "poll",
+ "description": "Poll the file system for new files. Much slower than non-polling but required for some file systems like NTFS mounts.",
+ "optional": true,
+ "default": false
+ },
+ {
+ "type": "boolean",
+ "name": "move_on_shutdown",
+ "description": "Move/delete files to the next tier when Viseron shuts down. Useful to not lose files when shutting down Viseron if using a RAM disk.",
+ "optional": true,
+ "default": false
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Min size in GB. Added together with min_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Min size in MB. Added together with min_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_size",
+ "description": "Minimum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Max size in GB. Added together with max_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Max size in MB. Added together with max_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_size",
+ "description": "Maximum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Max age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Max age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Max age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_age",
+ "description": "Maximum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Min age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Min age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Min age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_age",
+ "description": "Minimum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ }
+ ],
+ "name": "continuous",
+ "description": "Retention rules for continuous recordings.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Min size in GB. Added together with min_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Min size in MB. Added together with min_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_size",
+ "description": "Minimum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Max size in GB. Added together with max_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Max size in MB. Added together with max_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_size",
+ "description": "Maximum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Max age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Max age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Max age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_age",
+ "description": "Maximum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Min age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Min age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Min age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_age",
+ "description": "Minimum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ }
+ ],
+ "name": "events",
+ "description": "Retention rules for event recordings.",
+ "optional": true,
+ "default": null
+ }
+ ]
+ ],
+ "lengthMin": 1,
+ "name": "tiers",
+ "description": "Tiers are used to move files between different storage locations. When a file reaches the max age or max size of a tier, it will be moved to the next tier. If the file is already in the last tier, it will be deleted. ",
+ "required": true,
+ "default": null
+ }
+ ],
+ "name": "storage",
+ "description": "Storage options for the camera.
Overrides the configuration in the storage component.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Min size in GB. Added together with min_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Min size in MB. Added together with min_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_size",
+ "description": "Minimum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Max size in GB. Added together with max_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Max size in MB. Added together with max_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_size",
+ "description": "Maximum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Max age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Max age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Max age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_age",
+ "description": "Maximum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Min age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Min age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Min age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_age",
+ "description": "Minimum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ }
+ ],
+ "name": "continuous",
+ "description": "Retention rules for continuous recordings.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Min size in GB. Added together with min_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Min size in MB. Added together with min_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_size",
+ "description": "Minimum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Max size in GB. Added together with max_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Max size in MB. Added together with max_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_size",
+ "description": "Maximum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Max age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Max age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Max age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_age",
+ "description": "Maximum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Min age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Min age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Min age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_age",
+ "description": "Minimum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ }
+ ],
+ "name": "events",
+ "description": "Retention rules for event recordings.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "boolean",
+ "name": "create_event_clip",
+ "description": "Concatenate fragments to an MP4 file for each event. WARNING: Will store both the fragments AND the MP4 file, using more storage space.",
+ "optional": true,
+ "default": false
+ },
{
"type": "list",
"values": [
@@ -797,10 +1330,14 @@
},
{
"type": "string",
- "name": "segments_folder",
+ "name": {
+ "type": "deprecated",
+ "name": "segments_folder",
+ "value": "Config option 'segments_folder' is deprecated and will be removed in a future version."
+ },
"description": "What folder to store FFmpeg segments in. Segments are used to produce recordings so you should not need to change this.",
- "optional": true,
- "default": "/segments"
+ "deprecated": true,
+ "default": null
},
{
"type": "select",
diff --git a/docs/src/pages/components-explorer/components/ffmpeg/index.mdx b/docs/src/pages/components-explorer/components/ffmpeg/index.mdx
index fc32b3915..31a40df07 100644
--- a/docs/src/pages/components-explorer/components/ffmpeg/index.mdx
+++ b/docs/src/pages/components-explorer/components/ffmpeg/index.mdx
@@ -3,6 +3,7 @@ import Tabs from "@theme/Tabs";
import ComponentConfiguration from "@site/src/pages/components-explorer/_components/ComponentConfiguration";
import ComponentHeader from "@site/src/pages/components-explorer/_components/ComponentHeader";
+import ComponentTroubleshooting from "@site/src/pages/components-explorer/_components/ComponentTroubleshooting";
import Camera from "@site/src/pages/components-explorer/_domains/camera/index.mdx";
import CameraMjpegStreams from "@site/src/pages/components-explorer/_domains/camera/mjpeg_streams.mdx";
@@ -223,6 +224,9 @@ Example Docker command
```bash
docker run --rm \
-v {recordings path}:/recordings \
+ -v {recordings path}:/segments \
+ -v {recordings path}:/snapshots \
+ -v {recordings path}:/thumbnails \
-v {config path}:/config \
-v /etc/localtime:/etc/localtime:ro \
-p 8888:8888 \
@@ -242,6 +246,9 @@ services:
container_name: viseron
volumes:
- {recordings path}:/recordings
+ - {recordings path}:/segments
+ - {recordings path}:/snapshots
+ - {recordings path}:/thumbnails
- {config path}:/config
- /etc/localtime:/etc/localtime:ro
ports:
@@ -251,8 +258,20 @@ services:
```
```yaml title="/config/config.yaml"
-recorder:
- segments_folder: /tmp
+storage:
+ recorder:
+ tiers:
+ # Store 50 MB of segments in RAM disk
+ - path: /tmp/tier1
+ move_on_shutdown: true # Important to not lose segments on shutdown
+ events:
+ max_size:
+ mb: 50
+ # Keep 50 GB of segments on a normal drive
+ - path: /config/tier2
+ events:
+ max_size:
+ gb: 50
```
@@ -347,8 +366,8 @@ ffmpeg:
fps: 1
raw_command: | # Output to pipe:1
ffmpeg -rtsp_transport tcp -i rtsp://user:pass@192.168.XX.X:554/onvif_camera/profile.1 -vf fps=1.0 -f rawvideo -pix_fmt nv12 pipe:1
- raw_command: | # Output segments to /segments/viseron_vscode_camera
- ffmpeg -rtsp_transport tcp -i rtsp://user:pass@192.168.XX.X:554/onvif_camera/profile.0 -f segment -segment_time 5 -reset_timestamps 1 -strftime 1 -c:v copy /segments/viseron_vscode_camera/%Y%m%d%H%M%S.mp4
+ raw_command: | # Output segments to /segments/camera_one
+ ffmpeg -rtsp_transport tcp -i rtsp://user:pass@192.168.XX.X:554/onvif_camera/profile.0 -f segment -segment_time 5 -reset_timestamps 1 -strftime 1 -c:v copy /segments/camera_one/%Y%m%d%H%M%S.mp4
```
@@ -364,3 +383,7 @@ Most of the configuration options are ignored when using `raw_command`.
If you create a command that works well for your particular hardware, please share it with the community!
:::
+
+## Troubleshooting
+
+mp4
is the only supported extension."
+ },
"description": "The file extension used for recordings.",
- "optional": true,
- "default": "mp4"
+ "deprecated": true,
+ "default": null
},
{
"type": "map",
@@ -459,16 +479,20 @@
{
"type": "boolean",
"name": "save_to_disk",
- "description": "If true
, the thumbnail that is created on start of recording is saved to {folder}/{camera_identifier}/latest_thumbnail.jpg
",
+ "description": "If true
, the thumbnail that is created on start of recording is saved to {camera_identifier}/latest_thumbnail.jpg
Full path depends on the storage component tier configuration.",
"optional": true,
"default": true
},
{
"type": "string",
- "name": "filename_pattern",
+ "name": {
+ "type": "deprecated",
+ "name": "filename_pattern",
+ "value": "Thumbnails are stored with the same filename as the recording ID in the database, for example: 1.jpg, 2.jpg, 3.jpg etc."
+ },
"description": "A strftime pattern for saved thumbnails.
Default pattern results in filenames like: 23:59:59.jpg
.",
- "optional": true,
- "default": "%H:%M:%S"
+ "deprecated": true,
+ "default": null
}
],
"name": "thumbnail",
@@ -476,6 +500,515 @@
"optional": true,
"default": null
},
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "list",
+ "values": [
+ [
+ {
+ "type": "string",
+ "name": "path",
+ "description": "Path to store files in. Cannot be /tmp
or /tmp/viseron
.",
+ "required": true,
+ "default": null
+ },
+ {
+ "type": "boolean",
+ "name": "poll",
+ "description": "Poll the file system for new files. Much slower than non-polling but required for some file systems like NTFS mounts.",
+ "optional": true,
+ "default": false
+ },
+ {
+ "type": "boolean",
+ "name": "move_on_shutdown",
+ "description": "Move/delete files to the next tier when Viseron shuts down. Useful to not lose files when shutting down Viseron if using a RAM disk.",
+ "optional": true,
+ "default": false
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Min size in GB. Added together with min_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Min size in MB. Added together with min_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_size",
+ "description": "Minimum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Max size in GB. Added together with max_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Max size in MB. Added together with max_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_size",
+ "description": "Maximum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Max age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Max age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Max age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_age",
+ "description": "Maximum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Min age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Min age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Min age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_age",
+ "description": "Minimum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ }
+ ],
+ "name": "continuous",
+ "description": "Retention rules for continuous recordings.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Min size in GB. Added together with min_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Min size in MB. Added together with min_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_size",
+ "description": "Minimum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Max size in GB. Added together with max_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Max size in MB. Added together with max_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_size",
+ "description": "Maximum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Max age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Max age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Max age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_age",
+ "description": "Maximum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Min age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Min age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Min age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_age",
+ "description": "Minimum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ }
+ ],
+ "name": "events",
+ "description": "Retention rules for event recordings.",
+ "optional": true,
+ "default": null
+ }
+ ]
+ ],
+ "lengthMin": 1,
+ "name": "tiers",
+ "description": "Tiers are used to move files between different storage locations. When a file reaches the max age or max size of a tier, it will be moved to the next tier. If the file is already in the last tier, it will be deleted. ",
+ "required": true,
+ "default": null
+ }
+ ],
+ "name": "storage",
+ "description": "Storage options for the camera.
Overrides the configuration in the storage component.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Min size in GB. Added together with min_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Min size in MB. Added together with min_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_size",
+ "description": "Minimum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Max size in GB. Added together with max_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Max size in MB. Added together with max_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_size",
+ "description": "Maximum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Max age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Max age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Max age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_age",
+ "description": "Maximum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Min age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Min age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Min age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_age",
+ "description": "Minimum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ }
+ ],
+ "name": "continuous",
+ "description": "Retention rules for continuous recordings.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Min size in GB. Added together with min_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Min size in MB. Added together with min_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_size",
+ "description": "Minimum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Max size in GB. Added together with max_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Max size in MB. Added together with max_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_size",
+ "description": "Maximum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Max age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Max age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Max age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_age",
+ "description": "Maximum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Min age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Min age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Min age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_age",
+ "description": "Minimum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ }
+ ],
+ "name": "events",
+ "description": "Retention rules for event recordings.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "boolean",
+ "name": "create_event_clip",
+ "description": "Concatenate fragments to an MP4 file for each event. WARNING: Will store both the fragments AND the MP4 file, using more storage space.",
+ "optional": true,
+ "default": false
+ },
{
"type": "list",
"values": [
@@ -540,10 +1073,14 @@
},
{
"type": "string",
- "name": "segments_folder",
+ "name": {
+ "type": "deprecated",
+ "name": "segments_folder",
+ "value": "Config option 'segments_folder' is deprecated and will be removed in a future version."
+ },
"description": "What folder to store GStreamer segments in. Segments are used to produce recordings so you should not need to change this.",
- "optional": true,
- "default": "/segments"
+ "deprecated": true,
+ "default": null
},
{
"type": "select",
diff --git a/docs/src/pages/components-explorer/components/gstreamer/index.mdx b/docs/src/pages/components-explorer/components/gstreamer/index.mdx
index 8ab64e434..572696862 100644
--- a/docs/src/pages/components-explorer/components/gstreamer/index.mdx
+++ b/docs/src/pages/components-explorer/components/gstreamer/index.mdx
@@ -1,7 +1,9 @@
import ComponentConfiguration from "@site/src/pages/components-explorer/_components/ComponentConfiguration";
import ComponentHeader from "@site/src/pages/components-explorer/_components/ComponentHeader";
+import ComponentTroubleshooting from "@site/src/pages/components-explorer/_components/ComponentTroubleshooting";
import Camera from "@site/src/pages/components-explorer/_domains/camera/index.mdx";
import CameraMjpegStreams from "@site/src/pages/components-explorer/_domains/camera/mjpeg_streams.mdx";
+
import ComponentMetadata from "./_meta";
import config from "./config.json";
@@ -168,3 +170,7 @@ The third consideration is that small segments need to be saved to disk for proc
This is done by using the `splitmuxsink` element.
Last but not least, if you create a pipeline that works well for your particular hardware, please consider contributing it to Viseron, either by opening a PR or an issue.
+
+## Troubleshooting
+
+/tmp
or /tmp/viseron
.",
+ "required": true,
+ "default": null
+ },
+ {
+ "type": "boolean",
+ "name": "poll",
+ "description": "Poll the file system for new files. Much slower than non-polling but required for some file systems like NTFS mounts.",
+ "optional": true,
+ "default": false
+ },
+ {
+ "type": "boolean",
+ "name": "move_on_shutdown",
+ "description": "Move/delete files to the next tier when Viseron shuts down. Useful to not lose files when shutting down Viseron if using a RAM disk.",
+ "optional": true,
+ "default": false
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Min size in GB. Added together with min_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Min size in MB. Added together with min_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_size",
+ "description": "Minimum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Max size in GB. Added together with max_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Max size in MB. Added together with max_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_size",
+ "description": "Maximum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Max age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Max age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Max age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_age",
+ "description": "Maximum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Min age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Min age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Min age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_age",
+ "description": "Minimum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ }
+ ],
+ "name": "continuous",
+ "description": "Retention rules for continuous recordings.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Min size in GB. Added together with min_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Min size in MB. Added together with min_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_size",
+ "description": "Minimum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Max size in GB. Added together with max_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Max size in MB. Added together with max_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_size",
+ "description": "Maximum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Max age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Max age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Max age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_age",
+ "description": "Maximum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Min age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Min age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Min age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_age",
+ "description": "Minimum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ }
+ ],
+ "name": "events",
+ "description": "Retention rules for event recordings.",
+ "optional": true,
+ "default": null
+ }
+ ]
+ ],
+ "lengthMin": 1,
+ "name": "tiers",
+ "description": "Tiers are used to move files between different storage locations. When a file reaches the max age or max size of a tier, it will be moved to the next tier. If the file is already in the last tier, it will be deleted. ",
+ "optional": true,
+ "default": [
+ {
+ "path": "/",
+ "events": {
+ "max_age": {
+ "days": 7
+ }
+ }
+ }
+ ]
+ }
+ ],
+ "name": "recorder",
+ "description": "Configuration for recordings.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "list",
+ "values": [
+ [
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Min size in GB. Added together with min_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Min size in MB. Added together with min_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_size",
+ "description": "Minimum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Max size in GB. Added together with max_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Max size in MB. Added together with max_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_size",
+ "description": "Maximum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Max age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Max age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Max age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_age",
+ "description": "Maximum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Min age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Min age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Min age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_age",
+ "description": "Minimum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "string",
+ "name": "path",
+ "description": "Path to store files in. Cannot be /tmp
or /tmp/viseron
.",
+ "required": true,
+ "default": null
+ },
+ {
+ "type": "boolean",
+ "name": "poll",
+ "description": "Poll the file system for new files. Much slower than non-polling but required for some file systems like NTFS mounts.",
+ "optional": true,
+ "default": false
+ },
+ {
+ "type": "boolean",
+ "name": "move_on_shutdown",
+ "description": "Move/delete files to the next tier when Viseron shuts down. Useful to not lose files when shutting down Viseron if using a RAM disk.",
+ "optional": true,
+ "default": false
+ }
+ ]
+ ],
+ "lengthMin": 1,
+ "name": "tiers",
+ "description": "Default tiers for all domains, unless overridden in the domain configuration.
Tiers are used to move files between different storage locations. When a file reaches the max age or max size of a tier, it will be moved to the next tier. If the file is already in the last tier, it will be deleted. ",
+ "optional": true,
+ "default": [
+ {
+ "path": "/",
+ "max_age": {
+ "days": 7
+ }
+ }
+ ]
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "list",
+ "values": [
+ [
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Min size in GB. Added together with min_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Min size in MB. Added together with min_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_size",
+ "description": "Minimum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Max size in GB. Added together with max_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Max size in MB. Added together with max_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_size",
+ "description": "Maximum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Max age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Max age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Max age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_age",
+ "description": "Maximum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Min age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Min age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Min age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_age",
+ "description": "Minimum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "string",
+ "name": "path",
+ "description": "Path to store files in. Cannot be /tmp
or /tmp/viseron
.",
+ "required": true,
+ "default": null
+ },
+ {
+ "type": "boolean",
+ "name": "poll",
+ "description": "Poll the file system for new files. Much slower than non-polling but required for some file systems like NTFS mounts.",
+ "optional": true,
+ "default": false
+ },
+ {
+ "type": "boolean",
+ "name": "move_on_shutdown",
+ "description": "Move/delete files to the next tier when Viseron shuts down. Useful to not lose files when shutting down Viseron if using a RAM disk.",
+ "optional": true,
+ "default": false
+ }
+ ]
+ ],
+ "lengthMin": 1,
+ "name": "tiers",
+ "description": "Tiers are used to move files between different storage locations. When a file reaches the max age or max size of a tier, it will be moved to the next tier. If the file is already in the last tier, it will be deleted. ",
+ "required": true,
+ "default": null
+ }
+ ],
+ "name": "face_recognition",
+ "description": "Override the default snapshot tiers for face recognition. If not set, the default tiers will be used.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "list",
+ "values": [
+ [
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Min size in GB. Added together with min_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Min size in MB. Added together with min_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_size",
+ "description": "Minimum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "float",
+ "name": "gb",
+ "description": "Max size in GB. Added together with max_mb
.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "float",
+ "name": "mb",
+ "description": "Max size in MB. Added together with max_gb
.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_size",
+ "description": "Maximum size of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Max age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Max age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Max age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "max_age",
+ "description": "Maximum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "map",
+ "value": [
+ {
+ "type": "integer",
+ "name": "days",
+ "description": "Min age in days.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "hours",
+ "description": "Min age in hours.",
+ "optional": true,
+ "default": null
+ },
+ {
+ "type": "integer",
+ "name": "minutes",
+ "description": "Min age in minutes.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "min_age",
+ "description": "Minimum age of files to keep in this tier.",
+ "optional": true,
+ "default": {}
+ },
+ {
+ "type": "string",
+ "name": "path",
+ "description": "Path to store files in. Cannot be /tmp
or /tmp/viseron
.",
+ "required": true,
+ "default": null
+ },
+ {
+ "type": "boolean",
+ "name": "poll",
+ "description": "Poll the file system for new files. Much slower than non-polling but required for some file systems like NTFS mounts.",
+ "optional": true,
+ "default": false
+ },
+ {
+ "type": "boolean",
+ "name": "move_on_shutdown",
+ "description": "Move/delete files to the next tier when Viseron shuts down. Useful to not lose files when shutting down Viseron if using a RAM disk.",
+ "optional": true,
+ "default": false
+ }
+ ]
+ ],
+ "lengthMin": 1,
+ "name": "tiers",
+ "description": "Tiers are used to move files between different storage locations. When a file reaches the max age or max size of a tier, it will be moved to the next tier. If the file is already in the last tier, it will be deleted. ",
+ "required": true,
+ "default": null
+ }
+ ],
+ "name": "object_detection",
+ "description": "Override the default snapshot tiers for object detection. If not set, the default tiers will be used.",
+ "optional": true,
+ "default": null
+ }
+ ],
+ "name": "snapshots",
+ "description": "Snapshots are images taken when events are triggered or post processors finds anything. Snapshots will be taken for object detection, motiond detection, and any post processor that scans the image, for example face and license plate recognition.",
+ "optional": true,
+ "default": {}
+ }
+ ],
+ "name": "storage",
+ "description": "Storage configuration.",
+ "required": true,
+ "default": {}
+ }
+]
\ No newline at end of file
diff --git a/docs/src/pages/components-explorer/components/storage/index.mdx b/docs/src/pages/components-explorer/components/storage/index.mdx
new file mode 100644
index 000000000..02e4929e4
--- /dev/null
+++ b/docs/src/pages/components-explorer/components/storage/index.mdx
@@ -0,0 +1,179 @@
+import ComponentConfiguration from "@site/src/pages/components-explorer/_components/ComponentConfiguration";
+import ComponentHeader from "@site/src/pages/components-explorer/_components/ComponentHeader";
+import ComponentTroubleshooting from "@site/src/pages/components-explorer/_components/ComponentTroubleshooting";
+
+import ComponentMetadata from "./_meta";
+import config from "./config.json";
+
+Configuration example
+
+```yaml
+storage:
+ recorder:
+ tiers:
+ - path: /ssd/tier1
+ events:
+ max_age:
+ days: 1
+ continuous:
+ max_age:
+ days: 1
+ - path: /hdd/tier2
+ events:
+ max_age:
+ days: 7
+ snapshots:
+ tiers:
+ - path: /config/tier1
+ max_age:
+ days: 1
+```
+
+:::tip
+
+The above example will store recordings/events on `/ssd/tier1` for 1 day, and then move them to `/hdd/tier2` for 7 days.
+
+It will also store continuous recordings on `/ssd/tier1` for 1 day, after which they are deleted.
+
+:::
+
+Configuration example
above, a file will be completely deleted after **7 days**, not 8 days.
+
+:::
+
+## Tiers
+
+Tiers is list of directories that Viseron can use to store files.
+
+Viseron will always write to the first tier. `max_size`/`max_age` then decides when files are moved to the next tier or deleted.
+
+**It is of utmost importance that the first tier is a local disk or a RAM disk.**
+Viseron will not be able to detect new files and will not be able to gather file metadata if the first tier is a network share/NTFS mount.
+
+:::danger
+
+The first tier **cannot** be a network share/NTFS mount. Viseron will not be able to detect new files and will not be able to gather file metadata.
+The first tier should be a local disk or a RAM disk. The other tiers can be network shares/NTFS mounts.
+
+:::
+
+When the `max_size` is hit the oldest files will be moved to the next tier.
+When the age of a file exceeds the `max_age`, it will be moved to the next tier.
+If the current tier is the last one, the file is deleted instead.
+
+:::caution
+
+If you change a tier path, you need to manually move the files to the new path.
+
+:::
+
+:::info
+
+For technical reasons it is very likely that there will be a few extra megabytes of files than the `max_size` allows, so leave some extra space on the disk.
+
+:::
+
+### Continuous (24/7) recordings
+
+To enable continuous recordings for **all** cameras you use the `continuous` configuration option for a tier.
+The below example will store 10gb of continuous recordings in the default location `/segments`.
+No events will be stored.
+
+Continuous recordings configuration example
+
+```yaml title="/config/config.yaml"
+storage:
+ recordings:
+ tiers:
+ - path: / # Will store segments in /segments folder inside the container
+ continuous:
+ max_size:
+ gb: 10
+```
+
+
+[GStreamer](/components-explorer/components/GStreamer)
+
+:::
+
+### Advanced configuration
+
+For more advanced setups, `min_size` and `min_age` can be used.
+They are best explained in a `config.yaml` example:
+
+Advanced configuration example
+
+```yaml title="/config/config.yaml"
+storage:
+ recordings:
+ tiers:
+ - path: /mnt/ramdisk # Store continuous recordings and events here
+ move_on_shutdown: true
+ continuous:
+ max_size: # If this target is hit, segments will be moved to the next tier
+ gb: 1
+ max_age: # If this target is hit, segments will be moved to the next tier
+ days: 1
+ events:
+ max_size: # If this target is hit, segments will be moved to the next tier
+ gb: 1
+ max_age: # If this target is hit, segments will be moved to the next tier
+ days: 1
+ - path: /mnt/ssd # Store only events here
+ events:
+ min_size: # If max_age is hit, keep at least 1gb
+ gb: 1
+ max_size: # If this target is hit, segments will be moved to the next tier
+ gb: 10
+ max_age: # If this target is hit, segments will be moved to the next tier ONLY if size is larger than min_size
+ days: 7
+ - path: /mnt/nas # Store only events here
+ events:
+ min_age: # If max_size is hit, keep at least 30 days worth of recordings. DOES NOT AFFECT max_age
+ days: 30
+ max_size: # If this target is hit, recordings/segments will be DELETED ONLY if they are older than 30 days
+ gb: 100
+```
+
+:::tip
+
+The same strategy can be applied for snapshots (images of detected objects, faces etc)
+
+:::
+
+
Image Classification
License Plate Recognition
- Hardware Acceleration
+ Hardware Acceleration
+ 24/7 recordings
continuous
: Will save everything but highlight Events.events
: Will only save Events.trigger_recorder
, and ends when either no "
+ "objects or no motion (or both) is detected, depending on the configuration."
+)
+DESC_RECORDER_TIERS = (
+ "Tiers are used to move files between different storage locations. "
+ "When a file reaches the max age or max size of a tier, it will be moved to the "
+ "next tier. "
+ "If the file is already in the last tier, it will be deleted. "
+)
+DESC_SNAPSHOTS = (
+ "Snapshots are images taken when events are triggered or post processors finds "
+ "anything. "
+ "Snapshots will be taken for object detection, motiond detection, and any post "
+ "processor that scans the image, for example face and license plate recognition."
+)
+DESC_SNAPSHOTS_TIERS = (
+ "Default tiers for all domains, unless overridden in the domain configuration.min_mb
."
+DESC_MIN_MB = "Min size in MB. Added together with min_gb
."
+DESC_MAX_GB = "Max size in GB. Added together with max_mb
."
+DESC_MAX_MB = "Max size in MB. Added together with max_gb
."
+
+DESC_MIN_DAYS = "Min age in days."
+DESC_MIN_HOURS = "Min age in hours."
+DESC_MIN_MINUTES = "Min age in minutes."
+DESC_MAX_DAYS = "Max age in days."
+DESC_MAX_HOURS = "Max age in hours."
+DESC_MAX_MINUTES = "Max age in minutes."
+DESC_PATH = (
+ "Path to store files in. Cannot be /tmp
or /tmp/viseron
."
+)
+DESC_POLL = (
+ "Poll the file system for new files. "
+ "Much slower than non-polling but required for some file systems like NTFS mounts."
+)
+DESC_MOVE_ON_SHUTDOWN = (
+ "Move/delete files to the next tier when Viseron shuts down. "
+ "Useful to not lose files when shutting down Viseron if using a RAM disk."
+)
+DESC_MIN_SIZE = "Minimum size of files to keep in this tier."
+DESC_MAX_SIZE = "Maximum size of files to keep in this tier."
+DESC_MIN_AGE = "Minimum age of files to keep in this tier."
+DESC_MAX_AGE = "Maximum age of files to keep in this tier."
+DESC_CONTINUOUS = "Retention rules for continuous recordings."
+DESC_EVENTS = "Retention rules for event recordings."
diff --git a/viseron/components/storage/models.py b/viseron/components/storage/models.py
new file mode 100644
index 000000000..992a46ba3
--- /dev/null
+++ b/viseron/components/storage/models.py
@@ -0,0 +1,179 @@
+"""Database models for storage component."""
+from __future__ import annotations
+
+import datetime
+from typing import Callable, Dict, Literal, Optional
+
+from sqlalchemy import DateTime, Float, Integer, LargeBinary, String, types
+from sqlalchemy.dialects.postgresql import JSONB
+from sqlalchemy.ext.compiler import compiles
+from sqlalchemy.orm import DeclarativeBase, Mapped, Session, mapped_column
+from sqlalchemy.sql import expression
+
+ColumnMeta = Dict[str, str]
+
+
+class UTCDateTime(types.TypeDecorator):
+ """A DateTime type which can only store UTC datetimes."""
+
+ impl = DateTime
+ cache_ok = True
+
+ def process_bind_param(self, value, _dialect):
+ """Remove timezone info from datetime."""
+ if isinstance(value, datetime.datetime):
+ return value.replace(tzinfo=None)
+ return value
+
+ def process_result_value(self, value, _dialect):
+ """Add timezone info to datetime."""
+ if isinstance(value, datetime.datetime):
+ return value.replace(tzinfo=datetime.timezone.utc)
+ return value
+
+
+class UTCNow(expression.FunctionElement):
+ """Return the current timestamp in UTC."""
+
+ type = UTCDateTime()
+ inherit_cache = True
+
+
+@compiles(UTCNow, "postgresql")
+def pg_utcnow(
+ _element, _compiler, **_kw
+) -> Literal["TIMEZONE('utc', CURRENT_TIMESTAMP)"]:
+ """Compile utcnow function for postgresql."""
+ return "TIMEZONE('utc', CURRENT_TIMESTAMP)"
+
+
+class Base(DeclarativeBase):
+ """Base class for database models."""
+
+ type_annotation_map = {ColumnMeta: JSONB}
+
+
+class Files(Base):
+ """Database model for files."""
+
+ __tablename__ = "files"
+
+ id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
+ tier_id: Mapped[int] = mapped_column(Integer)
+ tier_path: Mapped[str] = mapped_column(String)
+ camera_identifier: Mapped[str] = mapped_column(String)
+ category: Mapped[str] = mapped_column(String)
+ subcategory: Mapped[str] = mapped_column(String)
+ path: Mapped[str] = mapped_column(String, unique=True)
+ directory: Mapped[str] = mapped_column(String)
+ filename: Mapped[str] = mapped_column(String)
+ size: Mapped[int] = mapped_column(Integer)
+ created_at = mapped_column(UTCDateTime(timezone=False), server_default=UTCNow())
+ updated_at = mapped_column(UTCDateTime(timezone=False), onupdate=UTCNow())
+
+
+class FilesMeta(Base):
+ """Database model for files metadata.
+
+ Used to store arbitrary metadata about files.
+ """
+
+ __tablename__ = "files_meta"
+
+ id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
+ path: Mapped[str] = mapped_column(String, unique=True)
+ orig_ctime = mapped_column(UTCDateTime(timezone=False), nullable=False)
+ meta: Mapped[ColumnMeta] = mapped_column(JSONB)
+ created_at = mapped_column(UTCDateTime(timezone=False), server_default=UTCNow())
+ updated_at = mapped_column(UTCDateTime(timezone=False), onupdate=UTCNow())
+
+
+class Recordings(Base):
+ """Database model for recordings."""
+
+ __tablename__ = "recordings"
+
+ id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
+ camera_identifier: Mapped[str] = mapped_column(String)
+ start_time: Mapped[datetime.datetime] = mapped_column(UTCDateTime(timezone=False))
+ end_time: Mapped[Optional[datetime.datetime]] = mapped_column(
+ UTCDateTime(timezone=False), nullable=True
+ )
+ created_at = mapped_column(UTCDateTime(timezone=False), server_default=UTCNow())
+ updated_at = mapped_column(UTCDateTime(timezone=False), onupdate=UTCNow())
+
+ trigger_type: Mapped[Optional[str]] = mapped_column(String, nullable=True)
+ trigger_id: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
+ thumbnail_path: Mapped[str] = mapped_column(String, nullable=True)
+
+ def get_fragments(
+ self, lookback: float, get_session: Callable[[], Session], now=None
+ ):
+ """Get all files for this recording.
+
+ Local import to avoid circular imports.
+ """
+ # pylint: disable-next=import-outside-toplevel
+ from viseron.components.storage.queries import get_recording_fragments
+
+ return get_recording_fragments(self.id, lookback, get_session, now)
+
+
+class Objects(Base):
+ """Database model for objects."""
+
+ __tablename__ = "objects"
+
+ id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
+ camera_identifier: Mapped[str] = mapped_column(String)
+ label: Mapped[str] = mapped_column(String)
+ confidence: Mapped[float] = mapped_column(Float)
+ width: Mapped[float] = mapped_column(Float)
+ height: Mapped[float] = mapped_column(Float)
+ x1: Mapped[float] = mapped_column(Float)
+ y1: Mapped[float] = mapped_column(Float)
+ x2: Mapped[float] = mapped_column(Float)
+ y2: Mapped[float] = mapped_column(Float)
+ snapshot_path: Mapped[str] = mapped_column(String, nullable=True)
+ zone: Mapped[Optional[str]] = mapped_column(String, nullable=True)
+ created_at = mapped_column(UTCDateTime(timezone=False), server_default=UTCNow())
+ updated_at = mapped_column(UTCDateTime(timezone=False), onupdate=UTCNow())
+
+
+class Motion(Base):
+ """Database model for motion."""
+
+ __tablename__ = "motion"
+
+ id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
+ camera_identifier: Mapped[str] = mapped_column(String)
+ start_time: Mapped[datetime.datetime] = mapped_column(UTCDateTime(timezone=False))
+ end_time: Mapped[Optional[datetime.datetime]] = mapped_column(
+ UTCDateTime(timezone=False), nullable=True
+ )
+ created_at = mapped_column(UTCDateTime(timezone=False), server_default=UTCNow())
+ updated_at = mapped_column(UTCDateTime(timezone=False), onupdate=UTCNow())
+
+
+class MotionContours(Base):
+ """Database model for motion contours."""
+
+ __tablename__ = "motion_contours"
+
+ id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
+ motion_id: Mapped[int] = mapped_column(Integer)
+ contour: Mapped[LargeBinary] = mapped_column(LargeBinary)
+ created_at = mapped_column(UTCDateTime(timezone=False), server_default=UTCNow())
+ updated_at = mapped_column(UTCDateTime(timezone=False), onupdate=UTCNow())
+
+
+class Events(Base):
+ """Database model for dispatched events."""
+
+ __tablename__ = "events"
+
+ id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
+ name: Mapped[str] = mapped_column(String)
+ data: Mapped[ColumnMeta] = mapped_column(JSONB)
+ created_at = mapped_column(UTCDateTime(timezone=False), server_default=UTCNow())
+ updated_at = mapped_column(UTCDateTime(timezone=False), onupdate=UTCNow())
diff --git a/viseron/components/storage/queries.py b/viseron/components/storage/queries.py
new file mode 100644
index 000000000..4ed564a7c
--- /dev/null
+++ b/viseron/components/storage/queries.py
@@ -0,0 +1,364 @@
+"""Database queries."""
+from __future__ import annotations
+
+import datetime
+import logging
+from typing import Callable
+
+from sqlalchemy import (
+ Integer,
+ String,
+ TextualSelect,
+ and_,
+ cast,
+ column,
+ desc,
+ func,
+ or_,
+ select,
+ text,
+)
+from sqlalchemy.dialects.postgresql import INTERVAL
+from sqlalchemy.orm import Session
+from sqlalchemy.sql.functions import coalesce, concat
+
+from viseron.components.storage.models import Files, FilesMeta, Recordings
+from viseron.helpers import utcnow
+
+LOGGER = logging.getLogger(__name__)
+
+
+def files_to_move_query(
+ category: str,
+ subcategory: str,
+ tier_id: int,
+ camera_identifier: str,
+ max_bytes: int,
+ min_age_timestamp: float,
+ min_bytes: int,
+ max_age_timestamp: float,
+) -> TextualSelect:
+ """Return query for files to move to next tier or delete."""
+ LOGGER.debug(
+ "Files to move query bindparms: "
+ "category(%s), subcategory(%s), tier_id(%s), camera_identifier(%s), "
+ "max_bytes(%s), min_age_timestamp(%s), min_bytes(%s), max_age_timestamp(%s)",
+ category,
+ subcategory,
+ tier_id,
+ camera_identifier,
+ max_bytes,
+ min_age_timestamp,
+ min_bytes,
+ max_age_timestamp,
+ )
+ return (
+ text(
+ """--sql
+ WITH size_sum AS (
+ SELECT f.id
+ ,f.tier_id
+ ,f.tier_path
+ ,f.camera_identifier
+ ,f.category
+ ,f.subcategory
+ ,f.path
+ ,fm.orig_ctime
+ ,sum(f.size) FILTER (
+ WHERE f.category = :category
+ AND f.subcategory = :subcategory
+ AND f.tier_id = :tier_id
+ AND f.camera_identifier = :camera_identifier
+ ) OVER win1 AS total_bytes
+ FROM files f
+ JOIN files_meta fm
+ ON f.path = fm.path
+ WINDOW win1 AS (
+ PARTITION BY f.category, f.tier_id
+ ORDER BY fm.orig_ctime DESC
+ RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+ )
+ ORDER BY fm.orig_ctime DESC
+ )
+ SELECT id, path, tier_path
+ FROM size_sum
+ WHERE tier_id = :tier_id
+ AND camera_identifier = :camera_identifier
+ AND category = :category
+ AND subcategory = :subcategory
+ AND (
+ :max_bytes > 0 AND
+ total_bytes >= :max_bytes AND
+ orig_ctime <= to_timestamp(:min_age_timestamp) AT TIME ZONE 'UTC'
+ ) OR (
+ :max_age_timestamp > 0 AND
+ orig_ctime <= to_timestamp(:max_age_timestamp) AT TIME ZONE 'UTC' AND
+ total_bytes >= :min_bytes
+ )
+ ORDER BY orig_ctime ASC;
+ """
+ )
+ .bindparams(
+ category=category,
+ subcategory=subcategory,
+ tier_id=tier_id,
+ camera_identifier=camera_identifier,
+ max_bytes=max_bytes,
+ min_age_timestamp=min_age_timestamp,
+ min_bytes=min_bytes,
+ max_age_timestamp=max_age_timestamp,
+ )
+ .columns(
+ column("id", Integer),
+ column("path", String),
+ column("tier_path", String),
+ )
+ )
+
+
+def recordings_to_move_query(
+ segment_length: int,
+ tier_id: int,
+ camera_identifier: str,
+ lookback: int,
+ max_bytes: int,
+ min_age_timestamp: float,
+ min_bytes: int,
+ max_age_timestamp: float,
+ file_min_age_timestamp: float,
+) -> TextualSelect:
+ """Return query for segments to move to next tier or delete."""
+ LOGGER.debug(
+ "Recordings to move query bindparms: "
+ "segment_length(%s), tier_id(%s), camera_identifier(%s), "
+ "lookback(%s), max_bytes(%s), min_age_timestamp(%s), min_bytes(%s), "
+ "max_age_timestamp(%s), file_min_age_timestamp(%s)",
+ segment_length,
+ tier_id,
+ camera_identifier,
+ lookback,
+ max_bytes,
+ min_age_timestamp,
+ min_bytes,
+ max_age_timestamp,
+ file_min_age_timestamp,
+ )
+ return (
+ text(
+ """--sql
+ WITH recording_files as (
+ SELECT f.id as file_id
+ ,f.tier_id
+ ,f.tier_path
+ ,f.camera_identifier
+ ,f.category
+ ,f.subcategory
+ ,f.path
+ ,f.size
+ ,r.id as recording_id
+ ,r.created_at as recording_created_at
+ ,meta.orig_ctime
+ FROM files f
+ JOIN files_meta meta
+ ON f.path = meta.path
+ LEFT JOIN recordings r
+ ON f.camera_identifier = r.camera_identifier
+ AND meta.orig_ctime BETWEEN
+ r.start_time - INTERVAL ':lookback sec'
+ - INTERVAL ':segment_length sec' AND
+ COALESCE(r.end_time + INTERVAL ':segment_length sec', now())
+ WHERE f.category = 'recorder'
+ -- Count the size of both segments and thumbnails
+ AND f.subcategory IN ('segments', 'thumbnails')
+ AND f.tier_id = :tier_id
+ AND f.camera_identifier = :camera_identifier
+ ),
+
+ recordings_size AS (
+ SELECT recording_id
+ ,sum(size) as recording_size
+ FROM recording_files
+ GROUP BY recording_id
+ ),
+
+ size_sum AS (
+ SELECT r.id
+ ,sum(rs.recording_size) OVER (
+ ORDER BY r.created_at DESC
+ ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+ ) AS total_bytes
+ FROM recordings r
+ JOIN recordings_size rs
+ ON r.id = rs.recording_id
+ )
+ SELECT DISTINCT ON (rf.file_id)
+ rf.recording_id
+ ,rf.file_id
+ ,rf.path
+ ,rf.tier_path
+ FROM recording_files rf
+ LEFT JOIN size_sum s
+ ON rf.recording_id = s.id
+ WHERE (
+ (
+ (
+ :max_bytes > 0 AND
+ s.total_bytes >= :max_bytes AND
+ rf.recording_created_at <= to_timestamp(:min_age_timestamp)
+ AT TIME ZONE 'UTC'
+ ) OR (
+ :max_age_timestamp > 0 AND
+ rf.recording_created_at <= to_timestamp(:max_age_timestamp)
+ AT TIME ZONE 'UTC' AND
+ s.total_bytes >= :min_bytes
+ )
+ ) OR s.total_bytes IS NULL
+ )
+ AND rf.orig_ctime <= to_timestamp(:file_min_age_timestamp) AT TIME ZONE 'UTC'
+ -- Only select segments and not thumbnails
+ AND rf.subcategory = 'segments'
+ ORDER BY rf.file_id ASC
+ ,rf.orig_ctime ASC
+ ,rf.recording_created_at ASC;
+ """
+ )
+ .bindparams(
+ tier_id=tier_id,
+ camera_identifier=camera_identifier,
+ segment_length=segment_length,
+ max_bytes=max_bytes,
+ min_age_timestamp=min_age_timestamp,
+ max_age_timestamp=max_age_timestamp,
+ min_bytes=min_bytes,
+ lookback=lookback,
+ file_min_age_timestamp=file_min_age_timestamp,
+ )
+ .columns(
+ column("recording_id", Integer),
+ column("file_id", Integer),
+ column("path", String),
+ column("tier_path", String),
+ )
+ )
+
+
+def get_recording_fragments(
+ recording_id,
+ lookback: float,
+ get_session: Callable[[], Session],
+ now=None,
+):
+ """Return a list of files for this recording.
+
+ We must sort on orig_ctime and not created_at as the created_at timestamp is
+ not accurate for m4s files that are created from the original mp4 file after
+ it has been recorded. The orig_ctime is the timestamp of the original mp4 file
+ and is therefore accurate.
+
+ Only the latest occurrence of each file is selected using the CTE row_number.
+ This is to accommodate for the case where a file has been copied to a succeeding
+ tier but has not been deleted from the original tier yet.
+ """
+ row_number = (
+ func.row_number()
+ .over(partition_by=Files.filename, order_by=desc(Files.created_at))
+ .label("row_number")
+ )
+ recording_files = (
+ select(Files, FilesMeta)
+ .add_columns(row_number)
+ .join(Recordings, Files.camera_identifier == Recordings.camera_identifier)
+ .join(FilesMeta, Files.path == FilesMeta.path)
+ .where(Recordings.id == recording_id)
+ .where(Files.category == "recorder")
+ .where(Files.path.endswith(".m4s"))
+ .where(
+ or_(
+ # Fetch all files that start within the recording
+ FilesMeta.orig_ctime.between(
+ Recordings.start_time - datetime.timedelta(seconds=lookback),
+ coalesce(Recordings.end_time, now if now else utcnow()),
+ ),
+ # Fetch the first file that starts before the recording but
+ # ends during the recording
+ and_(
+ Recordings.start_time - datetime.timedelta(seconds=lookback)
+ >= FilesMeta.orig_ctime,
+ Recordings.start_time - datetime.timedelta(seconds=lookback)
+ <= FilesMeta.orig_ctime
+ + cast(
+ concat(FilesMeta.meta["m3u8"]["EXTINF"], " sec"),
+ INTERVAL,
+ ),
+ ),
+ )
+ )
+ .order_by(FilesMeta.orig_ctime.asc())
+ .cte("recording_files")
+ )
+ stmt = (
+ select(recording_files)
+ .where(recording_files.c.row_number == 1)
+ .order_by(recording_files.c.orig_ctime.asc())
+ )
+ with get_session() as session:
+ fragments = session.execute(stmt).all()
+ return fragments
+
+
+def get_time_period_fragments(
+ camera_identifier: str,
+ start_timestamp: int,
+ end_timestamp: int | None,
+ get_session: Callable[[], Session],
+ now=None,
+):
+ """Return a list of files for the requested time period."""
+ start = datetime.datetime.utcfromtimestamp(start_timestamp)
+ if end_timestamp:
+ end = datetime.datetime.utcfromtimestamp(end_timestamp)
+ else:
+ end = now if now else utcnow()
+
+ row_number = (
+ func.row_number()
+ .over(partition_by=Files.filename, order_by=desc(Files.created_at))
+ .label("row_number")
+ )
+ files = (
+ select(Files, FilesMeta)
+ .add_columns(row_number)
+ .join(FilesMeta, Files.path == FilesMeta.path)
+ .where(Files.camera_identifier == camera_identifier)
+ .where(Files.category == "recorder")
+ .where(Files.path.endswith(".m4s"))
+ .where(FilesMeta.meta.comparator.has_key("m3u8")) # type: ignore[attr-defined]
+ .where(
+ or_(
+ # Fetch all files that start within the recording
+ FilesMeta.orig_ctime.between(
+ start,
+ end,
+ ),
+ # Fetch the first file that starts before the recording but
+ # ends during the recording
+ and_(
+ start >= FilesMeta.orig_ctime,
+ start
+ <= FilesMeta.orig_ctime
+ + cast(
+ concat(FilesMeta.meta["m3u8"]["EXTINF"], " sec"),
+ INTERVAL,
+ ),
+ ),
+ )
+ )
+ .order_by(FilesMeta.orig_ctime.asc())
+ .cte("files")
+ )
+ stmt = (
+ select(files).where(files.c.row_number == 1).order_by(files.c.orig_ctime.asc())
+ )
+ with get_session() as session:
+ fragments = session.execute(stmt).all()
+ return fragments
diff --git a/viseron/components/storage/tier_handler.py b/viseron/components/storage/tier_handler.py
new file mode 100644
index 000000000..1f1e33440
--- /dev/null
+++ b/viseron/components/storage/tier_handler.py
@@ -0,0 +1,789 @@
+"""Tier handler."""
+from __future__ import annotations
+
+import logging
+import os
+import shutil
+from datetime import datetime, timedelta
+from queue import Queue
+from threading import Lock, Timer
+from typing import TYPE_CHECKING, Any, Callable
+
+from sqlalchemy import Result, delete, insert, select, update
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy.orm import Session
+from watchdog.events import (
+ FileCreatedEvent,
+ FileDeletedEvent,
+ FileModifiedEvent,
+ FileSystemEvent,
+ FileSystemEventHandler,
+)
+from watchdog.observers import Observer
+from watchdog.observers.polling import PollingObserverVFS
+
+from viseron.components.storage.const import (
+ COMPONENT,
+ CONFIG_CONTINUOUS,
+ CONFIG_EVENTS,
+ CONFIG_MAX_AGE,
+ CONFIG_MAX_SIZE,
+ CONFIG_MIN_AGE,
+ CONFIG_MIN_SIZE,
+ CONFIG_MOVE_ON_SHUTDOWN,
+ CONFIG_PATH,
+ CONFIG_POLL,
+ MOVE_FILES_THROTTLE_SECONDS,
+)
+from viseron.components.storage.models import Files, FilesMeta, Recordings
+from viseron.components.storage.queries import (
+ files_to_move_query,
+ recordings_to_move_query,
+)
+from viseron.components.storage.util import (
+ calculate_age,
+ calculate_bytes,
+ files_to_move_overlap,
+ get_recorder_path,
+)
+from viseron.components.webserver.const import COMPONENT as WEBSERVER_COMPONENT
+from viseron.const import CAMERA_SEGMENT_DURATION, VISERON_SIGNAL_LAST_WRITE
+from viseron.domains.camera import FailedCamera
+from viseron.domains.camera.const import CONFIG_RECORDER, CONFIG_RETAIN
+from viseron.helpers import utcnow
+from viseron.watchdog.thread_watchdog import RestartableThread
+
+if TYPE_CHECKING:
+ from viseron import Viseron
+ from viseron.components.storage import Storage
+ from viseron.components.webserver import Webserver
+ from viseron.domains.camera import AbstractCamera
+
+
+class TierHandler(FileSystemEventHandler):
+ """Moves files up configured tiers."""
+
+ def __init__(
+ self,
+ vis: Viseron,
+ camera: AbstractCamera,
+ tier_id: int,
+ category: str,
+ subcategory: str,
+ tier: dict[str, Any],
+ next_tier: dict[str, Any] | None,
+ ) -> None:
+ self._logger = logging.getLogger(
+ f"{__name__}.{camera.identifier}.tier_{tier_id}"
+ )
+ super().__init__()
+
+ self._vis = vis
+ self._storage: Storage = vis.data[COMPONENT]
+ self._webserver: Webserver = self._vis.data[WEBSERVER_COMPONENT]
+ self._camera = camera
+ self._tier_id = tier_id
+ self._category = category
+ self._subcategory = subcategory
+ self._tier = tier
+ self._next_tier = next_tier
+
+ self.initialize()
+ vis.register_signal_handler(VISERON_SIGNAL_LAST_WRITE, self._shutdown)
+
+ self._pending_updates: dict[str, Timer] = {}
+ self._event_queue: Queue[FileSystemEvent | None] = Queue()
+ self._event_thread = RestartableThread(
+ target=self._process_events,
+ daemon=True,
+ name=f"tier_handler_{camera.identifier}",
+ stage=VISERON_SIGNAL_LAST_WRITE,
+ )
+ self._event_thread.start()
+
+ self._throttle_period = timedelta(
+ seconds=MOVE_FILES_THROTTLE_SECONDS,
+ )
+ self._time_of_last_call = utcnow()
+ self._check_tier_lock = Lock()
+
+ self._logger.debug("Tier %s monitoring path: %s", tier_id, self._path)
+ os.makedirs(self._path, exist_ok=True)
+ self._observer = (
+ PollingObserverVFS(stat=os.stat, listdir=os.scandir, polling_interval=1)
+ if tier[CONFIG_POLL]
+ else Observer()
+ )
+ self._observer.schedule(
+ self,
+ self._path,
+ recursive=True,
+ )
+ self._observer.start()
+
+ @property
+ def tier(self) -> dict[str, Any]:
+ """Tier configuration."""
+ return self._tier
+
+ def add_file_handler(self, path: str, pattern: str):
+ """Add file handler to webserver."""
+ self._logger.debug(f"Adding handler for /files{pattern}")
+ add_file_handler(
+ self._vis,
+ self._webserver,
+ path,
+ pattern,
+ self._camera,
+ self._category,
+ self._subcategory,
+ )
+
+ def initialize(self):
+ """Tier handler specific initialization."""
+ self._path = os.path.join(
+ self._tier[CONFIG_PATH],
+ self._category,
+ self._subcategory,
+ self._camera.identifier,
+ )
+
+ self._max_bytes = calculate_bytes(self._tier[CONFIG_MAX_SIZE])
+ self._min_bytes = calculate_bytes(self._tier[CONFIG_MIN_SIZE])
+ self._max_age = calculate_age(self._tier[CONFIG_MAX_AGE])
+ self._min_age = calculate_age(self._tier[CONFIG_MIN_AGE])
+
+ def check_tier(self) -> None:
+ """Check if file should be moved to next tier."""
+ now = utcnow()
+ with self._check_tier_lock:
+ time_since_last_call = now - self._time_of_last_call
+ if time_since_last_call > self._throttle_period:
+ self._time_of_last_call = now
+ else:
+ return
+ self._check_tier(self._storage.get_session)
+ self._time_of_last_call = now
+
+ def _check_tier(self, get_session: Callable[[], Session]) -> None:
+ file_ids = None
+ with get_session() as session:
+ file_ids = get_files_to_move(
+ session,
+ self._category,
+ self._subcategory,
+ self._tier_id,
+ self._camera.identifier,
+ self._max_bytes,
+ self._min_age,
+ self._min_bytes,
+ self._max_age,
+ )
+
+ if file_ids is not None:
+ for file in file_ids:
+ handle_file(
+ get_session,
+ self._storage,
+ self._camera.identifier,
+ self._tier,
+ self._next_tier,
+ file.path,
+ file.tier_path,
+ self._logger,
+ )
+ session.commit()
+
+ def _process_events(self) -> None:
+ while True:
+ event = self._event_queue.get()
+ if event is None:
+ self._logger.debug("Stopping event handler")
+ break
+ if isinstance(event, FileDeletedEvent):
+ self._on_deleted(event)
+ elif isinstance(event, FileCreatedEvent):
+ self._on_created(event)
+ elif isinstance(event, FileModifiedEvent):
+ self._on_modified(event)
+
+ def on_any_event(self, event: FileSystemEvent) -> None:
+ """Handle file system events."""
+ if os.path.basename(event.src_path) in self._storage.ignored_files:
+ return
+ self._event_queue.put(event)
+
+ def _on_created(self, event: FileCreatedEvent) -> None:
+ """Insert into database when file is created."""
+ self._logger.debug("File created: %s", event.src_path)
+ with self._storage.get_session() as session:
+ stmt = insert(Files).values(
+ tier_id=self._tier_id,
+ tier_path=self._tier[CONFIG_PATH],
+ camera_identifier=self._camera.identifier,
+ category=self._category,
+ subcategory=self._subcategory,
+ path=event.src_path,
+ directory=os.path.dirname(event.src_path),
+ filename=os.path.basename(event.src_path),
+ size=os.path.getsize(event.src_path),
+ )
+ session.execute(stmt)
+ session.commit()
+
+ self.check_tier()
+
+ def _on_modified(self, event: FileModifiedEvent) -> None:
+ """Update database when file is moved."""
+
+ def _update_size() -> None:
+ """Update the size of a file in the database.
+
+ Runs in a Timer to avoid spamming updates on duplicate events.
+ """
+ self._logger.debug("File modified (delayed event): %s", event.src_path)
+ self._pending_updates.pop(event.src_path, None)
+ try:
+ size = os.path.getsize(event.src_path)
+ except FileNotFoundError:
+ self._logger.debug("File not found: %s", event.src_path)
+ return
+
+ with self._storage.get_session() as session:
+ stmt = (
+ update(Files).where(Files.path == event.src_path).values(size=size)
+ )
+ session.execute(stmt)
+ session.commit()
+
+ self.check_tier()
+
+ if event.src_path in self._pending_updates:
+ self._pending_updates[event.src_path].cancel()
+ self._pending_updates[event.src_path] = Timer(1, _update_size)
+ self._pending_updates[event.src_path].start()
+
+ def _on_deleted(self, event: FileDeletedEvent) -> None:
+ """Remove file from database when it is deleted."""
+ self._logger.debug("File deleted: %s", event.src_path)
+ with self._storage.get_session() as session:
+ stmt = delete(Files).where(Files.path == event.src_path)
+ session.execute(stmt)
+ session.commit()
+
+ def _shutdown(self) -> None:
+ """Shutdown the observer and event handler."""
+ self._logger.debug("Stopping observer")
+ if self._tier[CONFIG_MOVE_ON_SHUTDOWN]:
+ self._logger.debug("Forcing move of files")
+ force_move_files(
+ self._storage,
+ self._storage.get_session,
+ self._category,
+ self._tier_id,
+ self._camera.identifier,
+ self._tier,
+ self._next_tier,
+ self._logger,
+ )
+ for pending_update in self._pending_updates.copy().values():
+ pending_update.join()
+ self._event_queue.put(None)
+ self._event_thread.join()
+ self._observer.stop()
+ self._observer.join()
+
+
+class RecorderTierHandler(TierHandler):
+ """Handle the recorder tiers."""
+
+ def initialize(self) -> None:
+ """Initialize recorder tier."""
+ self._path = get_recorder_path(self._tier, self._camera, self._subcategory)
+
+ self._continuous_max_bytes = calculate_bytes(
+ self._tier[CONFIG_CONTINUOUS][CONFIG_MAX_SIZE]
+ )
+ self._continuous_min_bytes = calculate_bytes(
+ self._tier[CONFIG_CONTINUOUS][CONFIG_MIN_SIZE]
+ )
+ self._continuous_max_age = calculate_age(
+ self._tier[CONFIG_CONTINUOUS][CONFIG_MAX_AGE]
+ )
+ self._continuous_min_age = calculate_age(
+ self._tier[CONFIG_CONTINUOUS][CONFIG_MIN_AGE]
+ )
+ self._continuous_params = [
+ self._continuous_max_bytes,
+ self._continuous_min_age,
+ self._continuous_min_bytes,
+ self._continuous_max_age,
+ ]
+
+ self._events_max_bytes = calculate_bytes(
+ self._tier[CONFIG_EVENTS][CONFIG_MAX_SIZE]
+ )
+ self._events_min_bytes = calculate_bytes(
+ self._tier[CONFIG_EVENTS][CONFIG_MIN_SIZE]
+ )
+ self._events_min_age = calculate_age(self._tier[CONFIG_EVENTS][CONFIG_MIN_AGE])
+
+ if self._tier_id == 0 and self._camera.config.get(CONFIG_RECORDER, {}).get(
+ CONFIG_RETAIN, None
+ ):
+ self._logger.warning(
+ f"Camera {self._camera.identifier} is using 'retain' for 'recorder' "
+ "which has been deprecated and will be removed in a future release. "
+ "Please use the new 'storage' component with the 'max_age' config "
+ "option instead. For now, the value of 'retain' will be used as "
+ "'max_age' for the first tier, but this WILL change and might cause "
+ "you to lose data."
+ )
+ self._events_max_age = timedelta(
+ days=self._camera.config[CONFIG_RECORDER][CONFIG_RETAIN]
+ )
+ else:
+ self._events_max_age = calculate_age(
+ self._tier[CONFIG_EVENTS][CONFIG_MAX_AGE]
+ )
+ self._events_params = [
+ self._events_max_bytes,
+ self._events_max_age,
+ self._events_min_bytes,
+ self._events_min_age,
+ ]
+
+ self.add_file_handler(self._path, rf"{self._path}/(.*.m4s$)")
+ self.add_file_handler(self._path, rf"{self._path}/(.*.mp4$)")
+
+ def _check_tier(self, get_session: Callable[[], Session]) -> None:
+ events_enabled = False
+ continuous_enabled = False
+ events_file_ids: Result[Any] | list = []
+ continuous_file_ids: Result[Any] | list = []
+ with get_session() as session:
+ if any(self._events_params):
+ events_enabled = True
+ events_file_ids = get_recordings_to_move(
+ session,
+ self._tier_id,
+ self._camera.identifier,
+ self._camera.recorder.lookback,
+ self._events_max_bytes,
+ self._events_min_age,
+ self._events_min_bytes,
+ self._events_max_age,
+ )
+
+ if any(self._continuous_params):
+ continuous_enabled = True
+ continuous_file_ids = get_files_to_move(
+ session,
+ self._category,
+ self._subcategory,
+ self._tier_id,
+ self._camera.identifier,
+ self._continuous_max_bytes,
+ self._continuous_min_age,
+ self._continuous_min_bytes,
+ self._continuous_max_age,
+ )
+
+ events_file_ids = list(events_file_ids)
+ # A file can be in multiple recordings, so we need to keep track of which
+ # files we have already processed using processed_paths
+ processed_paths = []
+ if events_enabled and not continuous_enabled:
+ for file in events_file_ids:
+ if file.path in processed_paths:
+ continue
+ handle_file(
+ get_session,
+ self._storage,
+ self._camera.identifier,
+ self._tier,
+ self._next_tier,
+ file.path,
+ file.tier_path,
+ self._logger,
+ )
+ processed_paths.append(file.path)
+ elif continuous_enabled and not events_enabled:
+ for file in continuous_file_ids:
+ handle_file(
+ get_session,
+ self._storage,
+ self._camera.identifier,
+ self._tier,
+ self._next_tier,
+ file.path,
+ file.tier_path,
+ self._logger,
+ )
+ else:
+ overlap = files_to_move_overlap(events_file_ids, continuous_file_ids)
+ for file in overlap:
+ if file.path in processed_paths:
+ continue
+ handle_file(
+ get_session,
+ self._storage,
+ self._camera.identifier,
+ self._tier,
+ self._next_tier,
+ file.path,
+ file.tier_path,
+ self._logger,
+ )
+ processed_paths.append(file.path)
+
+ recording_ids: list[int] = []
+ for recording in events_file_ids:
+ if (
+ recording.recording_id
+ and recording.recording_id not in recording_ids
+ ):
+ recording_ids.append(recording.recording_id)
+
+ # Signal to the thumbnail tier that the recording has been moved
+ if recording_ids:
+ self._logger.debug(
+ "Handle thumbnails for recordings: %s", recording_ids
+ )
+ for recording_id in recording_ids:
+ thumbnail_tier_handler: ThumbnailTierHandler = (
+ self._storage.camera_tier_handlers[self._camera.identifier][
+ self._category
+ ][self._tier_id]["thumbnails"]
+ )
+ thumbnail_tier_handler.move_thumbnail(recording_id)
+
+ # Delete recordings from Recordings table if this is the last tier
+ if recording_ids and self._next_tier is None:
+ self._logger.debug("Deleting recordings: %s", recording_ids)
+ with get_session() as _session:
+ stmt = delete(Recordings).where(Recordings.id.in_(recording_ids))
+ _session.execute(stmt)
+ _session.commit()
+
+ session.commit()
+
+
+class SnapshotTierHandler(TierHandler):
+ """Handle the snapshot tiers."""
+
+ def initialize(self):
+ """Initialize snapshot tier."""
+ super().initialize()
+ self.add_file_handler(self._path, rf"{self._path}/(.*.jpg$)")
+
+
+class ThumbnailTierHandler(TierHandler):
+ """Handle thumbnails."""
+
+ def initialize(self):
+ """Initialize thumbnail tier."""
+ self._path = os.path.join(
+ self._tier[CONFIG_PATH],
+ "thumbnails",
+ self._camera.identifier,
+ )
+ self.add_file_handler(self._path, rf"{self._path}/(.*.jpg$)")
+
+ def check_tier(self) -> None:
+ """Do nothing, as we don't want to move thumbnails."""
+
+ def on_any_event(self, event: FileSystemEvent) -> None:
+ """Ignore changes to latest_thumbnail.jpg."""
+ if os.path.basename(event.src_path) == "latest_thumbnail.jpg":
+ return
+ return super().on_any_event(event)
+
+ def _on_created(self, event: FileCreatedEvent) -> None:
+ try:
+ with self._storage.get_session() as session:
+ stmt = (
+ update(Recordings)
+ .where(
+ Recordings.id == os.path.basename(event.src_path).split(".")[0]
+ )
+ .values(thumbnail_path=event.src_path)
+ )
+ session.execute(stmt)
+ session.commit()
+ except Exception as error: # pylint: disable=broad-except
+ self._logger.error(
+ "Failed to update thumbnail path for recording with path: "
+ f"{event.src_path}: {error}"
+ )
+ super()._on_created(event)
+
+ def move_thumbnail(self, recording_id: int) -> None:
+ """Move thumbnail to next tier."""
+ with self._storage.get_session() as session:
+ sel = select(Recordings).where(Recordings.id == recording_id)
+ recording = session.execute(sel).scalar_one()
+ handle_file(
+ self._storage.get_session,
+ self._storage,
+ self._camera.identifier,
+ self._tier,
+ self._next_tier,
+ recording.thumbnail_path,
+ self._tier[CONFIG_PATH],
+ self._logger,
+ )
+ session.commit()
+
+
+def handle_file(
+ get_session: Callable[..., Session],
+ storage: Storage,
+ camera_identifier: str,
+ curr_tier: dict[str, Any],
+ next_tier: dict[str, Any] | None,
+ path: str,
+ tier_path: str,
+ logger: logging.Logger,
+) -> None:
+ """Move file if there is a succeeding tier, else delete the file."""
+ if path in storage.camera_requested_files_count[camera_identifier].filenames:
+ logger.debug("File %s is recently requested, skipping", path)
+ return
+
+ if next_tier is None:
+ delete_file(get_session, path, logger)
+ else:
+ new_path = path.replace(tier_path, next_tier[CONFIG_PATH], 1)
+ if new_path == path:
+ logger.warning(
+ "Failed to move file %s to next tier, new path is the same as old. "
+ "Viseron tries to mitigate this, but it can happen if you recently "
+ "changed the tier paths.",
+ path,
+ )
+ else:
+ move_file(
+ get_session,
+ path,
+ new_path,
+ logger,
+ )
+
+ # Delete the file from the database if tier_path is not the same as
+ # curr_tier[CONFIG_PATH]. This is an indication that the tier configuration
+ # has changed and since the old path is not monitored, the delete signal
+ # will not be received by Viseron
+ if tier_path != curr_tier[CONFIG_PATH]:
+ logger.debug(
+ "Deleting file %s from database since tier paths are different. "
+ "file tier_path: %s, current tier_path: %s",
+ path,
+ tier_path,
+ curr_tier[CONFIG_PATH],
+ )
+ with get_session() as session:
+ stmt = delete(Files).where(Files.path == path)
+ session.execute(stmt)
+ session.commit()
+
+
+def move_file(
+ get_session: Callable[..., Session],
+ src: str,
+ dst: str,
+ logger: logging.Logger,
+) -> None:
+ """Move file from src to dst.
+
+ To avoid race conditions where a file is referenced at the same time as it is being
+ moved, causing a 404 in the browser, we copy the file to the new location and then
+ delete the old one.
+ """
+ logger.debug("Moving file from %s to %s", src, dst)
+ try:
+ with get_session() as session:
+ sel = select(FilesMeta).where(FilesMeta.path == src)
+ res = session.execute(sel).scalar_one()
+ ins = insert(FilesMeta).values(
+ path=dst, meta=res.meta, orig_ctime=res.orig_ctime
+ )
+ session.execute(ins)
+ session.commit()
+ except IntegrityError:
+ logger.error(f"Failed to insert metadata for {dst}", exc_info=True)
+
+ try:
+ os.makedirs(os.path.dirname(dst), exist_ok=True)
+ shutil.copy(src, dst)
+ os.remove(src)
+ except FileNotFoundError as error:
+ logger.error(f"Failed to move file {src} to {dst}: {error}")
+ with get_session() as session:
+ stmt = delete(Files).where(Files.path == src)
+ session.execute(stmt)
+ session.commit()
+
+
+def delete_file(
+ get_session: Callable[..., Session],
+ path: str,
+ logger: logging.Logger,
+) -> None:
+ """Delete file."""
+ logger.debug("Deleting file %s", path)
+ with get_session() as session:
+ stmt = delete(Files).where(Files.path == path)
+ session.execute(stmt)
+ session.commit()
+
+ try:
+ os.remove(path)
+ except FileNotFoundError as error:
+ logger.error(f"Failed to delete file {path}: {error}")
+
+
+def get_files_to_move(
+ session: Session,
+ category: str,
+ subcategory: str,
+ tier_id: int,
+ camera_identifier: str,
+ max_bytes: int,
+ min_age: timedelta,
+ min_bytes: int,
+ max_age: timedelta,
+) -> Result[Any]:
+ """Get id of files to move."""
+ now = utcnow()
+
+ # If min_age is not set, we want to ignore files that are less than 5 seconds old
+ # This is to avoid moving files that are still being written to
+ if min_age:
+ min_age_timestamp = (now - min_age).timestamp()
+ else:
+ min_age_timestamp = (now - timedelta(seconds=5)).timestamp()
+
+ if max_age:
+ max_age_timestamp = (now - max_age).timestamp()
+ else:
+ max_age_timestamp = 0
+
+ stmt = files_to_move_query(
+ category,
+ subcategory,
+ tier_id,
+ camera_identifier,
+ max_bytes,
+ min_age_timestamp,
+ min_bytes,
+ max_age_timestamp,
+ )
+ result = session.execute(stmt)
+ return result
+
+
+def get_recordings_to_move(
+ session: Session,
+ tier_id: int,
+ camera_identifier: str,
+ lookback: int,
+ max_bytes: int,
+ min_age: timedelta,
+ min_bytes: int,
+ max_age: timedelta,
+ now: datetime | None = None,
+) -> Result[Any]:
+ """Get id of recordings and segments to move."""
+ if now is None:
+ now = utcnow()
+
+ min_age_timestamp = (now - min_age).timestamp()
+ if max_age:
+ max_age_timestamp = (now - max_age).timestamp()
+ else:
+ max_age_timestamp = 0
+
+ # We want to ignore files that are less than 5 times the
+ # segment duration old. This is to improve HLS streaming
+ file_min_age = (now - timedelta(seconds=CAMERA_SEGMENT_DURATION * 5)).timestamp()
+
+ stmt = recordings_to_move_query(
+ CAMERA_SEGMENT_DURATION,
+ tier_id,
+ camera_identifier,
+ lookback,
+ max_bytes,
+ min_age_timestamp,
+ min_bytes,
+ max_age_timestamp,
+ file_min_age,
+ )
+ result = session.execute(stmt)
+ return result
+
+
+def force_move_files(
+ storage: Storage,
+ get_session: Callable[..., Session],
+ category: str,
+ tier_id: int,
+ camera_identifier: str,
+ curr_tier: dict[str, Any],
+ next_tier: dict[str, Any] | None,
+ logger: logging.Logger,
+) -> None:
+ """Get and move/delete all files in tier."""
+ with get_session() as session:
+ stmt = (
+ select(Files)
+ .where(Files.category == category)
+ .where(Files.tier_id == tier_id)
+ .where(Files.camera_identifier == camera_identifier)
+ )
+ result = session.execute(stmt)
+ for file in result:
+ handle_file(
+ get_session,
+ storage,
+ camera_identifier,
+ curr_tier,
+ next_tier,
+ file.path,
+ file.tier_path,
+ logger,
+ )
+ session.commit()
+
+
+def add_file_handler(
+ vis: Viseron,
+ webserver: Webserver,
+ path: str,
+ pattern: str,
+ camera: AbstractCamera | FailedCamera,
+ category: str,
+ subcategory: str,
+) -> None:
+ """Add file handler to webserver."""
+ # We have to import this here to avoid circular imports
+ # pylint: disable-next=import-outside-toplevel
+ from viseron.components.webserver.tiered_file_handler import TieredFileHandler
+
+ webserver.application.add_handlers(
+ r".*",
+ [
+ (
+ (rf"/files{pattern}"),
+ TieredFileHandler,
+ {
+ "path": path,
+ "vis": vis,
+ "camera_identifier": camera.identifier,
+ "failed": bool(isinstance(camera, FailedCamera)),
+ "category": category,
+ "subcategory": subcategory,
+ },
+ )
+ ],
+ )
diff --git a/viseron/components/storage/triggers.py b/viseron/components/storage/triggers.py
new file mode 100644
index 000000000..08180fe77
--- /dev/null
+++ b/viseron/components/storage/triggers.py
@@ -0,0 +1,53 @@
+"""Set up database triggers."""
+
+import logging
+
+from sqlalchemy import Connection, delete, event
+from sqlalchemy.dialects.postgresql import insert
+
+from viseron.components.storage.models import Files, FilesMeta
+from viseron.helpers import utcnow
+
+LOGGER = logging.getLogger(__name__)
+
+
+def insert_into_files_meta(
+ conn: Connection,
+ clauseelement,
+ _multiparams,
+ _params,
+ _execution_options,
+) -> None:
+ """Insert a row into FilesMeta when a new row is inserted into Files."""
+ if clauseelement.is_insert and clauseelement.table.name == Files.__tablename__:
+ compiled = clauseelement.compile()
+ conn.execute(
+ insert(FilesMeta)
+ .values(
+ path=compiled.params["path"],
+ orig_ctime=utcnow(),
+ meta={},
+ )
+ .on_conflict_do_nothing(index_elements=["path"])
+ )
+
+
+def delete_from_files_meta(
+ conn: Connection,
+ clauseelement,
+ _multiparams,
+ _params,
+ _execution_options,
+) -> None:
+ """Delete a row from FilesMeta when a row is deleted from Files."""
+ if clauseelement.is_delete and clauseelement.table.name == Files.__tablename__:
+ compiled = clauseelement.compile()
+ conn.execute(
+ delete(FilesMeta).where(FilesMeta.path == compiled.params["path_1"])
+ )
+
+
+def setup_triggers(engine) -> None:
+ """Set up database triggers."""
+ event.listen(engine, "before_execute", insert_into_files_meta)
+ event.listen(engine, "after_execute", delete_from_files_meta)
diff --git a/viseron/components/storage/util.py b/viseron/components/storage/util.py
new file mode 100644
index 000000000..08f71573f
--- /dev/null
+++ b/viseron/components/storage/util.py
@@ -0,0 +1,119 @@
+"""Storage component utility functions."""
+from __future__ import annotations
+
+import os
+import threading
+from datetime import timedelta
+from types import TracebackType
+from typing import TYPE_CHECKING, Any
+
+from viseron.components.storage.const import (
+ CONFIG_DAYS,
+ CONFIG_GB,
+ CONFIG_HOURS,
+ CONFIG_MB,
+ CONFIG_MINUTES,
+ CONFIG_PATH,
+)
+
+if TYPE_CHECKING:
+ from viseron.domains.camera import AbstractCamera, FailedCamera
+
+
+def calculate_age(age: dict[str, Any]) -> timedelta:
+ """Calculate age in seconds."""
+ if not age:
+ return timedelta(seconds=0)
+
+ return timedelta(
+ days=age[CONFIG_DAYS] if age[CONFIG_DAYS] else 0,
+ hours=age[CONFIG_HOURS] if age[CONFIG_HOURS] else 0,
+ minutes=age[CONFIG_MINUTES] if age[CONFIG_MINUTES] else 0,
+ )
+
+
+def calculate_bytes(size: dict[str, Any]) -> int:
+ """Calculate size in bytes."""
+ max_bytes = 0
+ if size[CONFIG_MB]:
+ max_bytes += convert_mb_to_bytes(size[CONFIG_MB])
+ if size[CONFIG_GB]:
+ max_bytes += convert_gb_to_bytes(size[CONFIG_GB])
+ return max_bytes
+
+
+def convert_mb_to_bytes(mb: int) -> int:
+ """Convert mb to bytes."""
+ return mb * 1024 * 1024
+
+
+def convert_gb_to_bytes(gb: int) -> int:
+ """Convert gb to bytes."""
+ return gb * 1024 * 1024 * 1024
+
+
+def get_recorder_path(
+ tier: dict[str, Any], camera: AbstractCamera | FailedCamera, subcategory: str
+) -> str:
+ """Get recorder path for camera."""
+ return os.path.join(tier[CONFIG_PATH], subcategory, camera.identifier)
+
+
+def get_thumbnails_path(
+ tier: dict[str, Any], camera: AbstractCamera | FailedCamera
+) -> str:
+ """Get thumbnails path for camera."""
+ return os.path.join(tier[CONFIG_PATH], "thumbnails", camera.identifier)
+
+
+def get_snapshots_path(
+ tier: dict[str, Any],
+ camera: AbstractCamera | FailedCamera,
+ domain: str,
+) -> str:
+ """Get snapshots path for camera."""
+ return os.path.join(tier[CONFIG_PATH], "snapshots", domain, camera.identifier)
+
+
+def files_to_move_overlap(events_file_ids, continuous_file_ids):
+ """Find the files that are in both events and continuous delete list."""
+ events_dict = {row.file_id: row for row in events_file_ids}
+ continuous_dict = {row.id: row for row in continuous_file_ids}
+ # Find the matching tuples based on "file_id" and "id"
+ matched_ids = [
+ events_dict[file_id] for file_id in events_dict if file_id in continuous_dict
+ ]
+ return matched_ids
+
+
+class RequestedFilesCount:
+ """Context manager for keeping track of recently requested files."""
+
+ def __init__(self) -> None:
+ self.count = 0
+ self.filenames: list[str] = []
+
+ def remove_filename(self, filename: str) -> None:
+ """Remove a filename from the list of active filenames."""
+ self.filenames.remove(filename)
+
+ def __call__(self, filename: str) -> RequestedFilesCount:
+ """Add a filename to the list of active filenames."""
+ self.filenames.append(filename)
+ timer = threading.Timer(2, self.remove_filename, args=(filename,))
+ timer.start()
+ return self
+
+ def __enter__(self):
+ """Increment the counter when entering the context."""
+ self.count += 1
+ return self.count
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> None:
+ """Decrement the counter when exiting the context."""
+ self.count -= 1
diff --git a/viseron/components/webserver/__init__.py b/viseron/components/webserver/__init__.py
index b42eb9ce8..a16a5fe34 100644
--- a/viseron/components/webserver/__init__.py
+++ b/viseron/components/webserver/__init__.py
@@ -15,17 +15,7 @@
from tornado.routing import PathMatches
from viseron.components.webserver.auth import Auth
-from viseron.components.webserver.static_file_handler import (
- AccessTokenStaticFileHandler,
-)
-from viseron.const import (
- DOMAIN_FAILED,
- EVENT_DOMAIN_REGISTERED,
- EVENT_DOMAIN_SETUP_STATUS,
- VISERON_SIGNAL_SHUTDOWN,
-)
-from viseron.domains.camera import AbstractCamera, FailedCamera
-from viseron.domains.camera.const import DOMAIN as CAMERA_DOMAIN
+from viseron.const import DEFAULT_PORT, VISERON_SIGNAL_SHUTDOWN
from viseron.exceptions import ComponentNotReady
from viseron.helpers.storage import Storage
from viseron.helpers.validators import CoerceNoneToDict
@@ -42,7 +32,6 @@
CONFIG_SESSION_EXPIRY,
DEFAULT_COMPONENT,
DEFAULT_DEBUG,
- DEFAULT_PORT,
DEFAULT_SESSION_EXPIRY,
DESC_AUTH,
DESC_COMPONENT,
@@ -77,8 +66,7 @@
)
if TYPE_CHECKING:
- from viseron import Event, Viseron
- from viseron.components import DomainToSetup
+ from viseron import Viseron
LOGGER = logging.getLogger(__name__)
@@ -188,7 +176,9 @@ def cookie_secret(self):
return self._data["cookie_secret"]
-def create_application(vis: Viseron, config, cookie_secret, xsrf_cookies=True):
+def create_application(
+ vis: Viseron, config, cookie_secret, xsrf_cookies=True
+) -> tornado.web.Application:
"""Return tornado web app."""
application = tornado.web.Application(
[
@@ -226,6 +216,7 @@ def create_application(vis: Viseron, config, cookie_secret, xsrf_cookies=True):
static_path=PATH_STATIC,
websocket_ping_interval=10,
debug=config[CONFIG_DEBUG],
+ autoreload=False,
cookie_secret=cookie_secret,
xsrf_cookies=xsrf_cookies,
)
@@ -256,9 +247,10 @@ def __init__(self, vis: Viseron, config) -> None:
self._asyncio_ioloop = asyncio.new_event_loop()
asyncio.set_event_loop(self._asyncio_ioloop)
- self.application = create_application(vis, config, self._store.cookie_secret)
+ self._application = create_application(vis, config, self._store.cookie_secret)
+ self._httpserver = None
try:
- self.application.listen(
+ self._httpserver = self._application.listen(
config[CONFIG_PORT],
xheaders=True,
)
@@ -268,21 +260,16 @@ def __init__(self, vis: Viseron, config) -> None:
raise error
self._ioloop = tornado.ioloop.IOLoop.current()
- self._vis.listen_event(
- EVENT_DOMAIN_REGISTERED.format(domain=CAMERA_DOMAIN), self.camera_registered
- )
- self._vis.listen_event(
- EVENT_DOMAIN_SETUP_STATUS.format(
- status=DOMAIN_FAILED, domain=CAMERA_DOMAIN, identifier="*"
- ),
- self.camera_registered,
- )
-
@property
def auth(self):
"""Return auth."""
return self._auth
+ @property
+ def application(self):
+ """Return application."""
+ return self._application
+
def register_websocket_command(self, handler) -> None:
"""Register a websocket command."""
if handler.command in self._vis.data[WEBSOCKET_COMMANDS]:
@@ -291,44 +278,6 @@ def register_websocket_command(self, handler) -> None:
self._vis.data[WEBSOCKET_COMMANDS][handler.command] = (handler, handler.schema)
- def _serve_camera_recordings(
- self, camera: AbstractCamera | FailedCamera, failed=False
- ) -> None:
- """Serve recordings of each camera in a static file handler."""
- self.application.add_handlers(
- r".*",
- [
- (
- (
- rf"\/recordings\/{camera.identifier}\/"
- rf"(.*\/.*\.(mp4$|mkv$|mov$|jpg$|{camera.extension}$))"
- ),
- AccessTokenStaticFileHandler,
- {
- "path": camera.recorder.recordings_folder,
- "vis": self._vis,
- "camera_identifier": camera.identifier,
- "failed": failed,
- },
- )
- ],
- )
-
- def camera_registered(
- self, event_data: Event[AbstractCamera | DomainToSetup]
- ) -> None:
- """Handle camera registering."""
- camera: AbstractCamera | FailedCamera | None = None
- failed = False
- if isinstance(event_data.data, AbstractCamera):
- camera = event_data.data
- else:
- camera = event_data.data.error_instance
- failed = True
-
- if camera:
- self._serve_camera_recordings(camera, failed)
-
def run(self) -> None:
"""Start ioloop."""
self._ioloop.start()
@@ -355,4 +304,9 @@ def stop(self) -> None:
for task in asyncio.Task.all_tasks():
task.cancel()
+ if self._httpserver:
+ LOGGER.debug("Stopping HTTPServer")
+ self._httpserver.stop()
+
+ LOGGER.debug("Stopping IOloop")
self._ioloop.stop()
diff --git a/viseron/components/webserver/api/__init__.py b/viseron/components/webserver/api/__init__.py
index fc750e7ff..54ea23ad0 100644
--- a/viseron/components/webserver/api/__init__.py
+++ b/viseron/components/webserver/api/__init__.py
@@ -31,10 +31,22 @@ def find_handler(
self, request: HTTPServerRequest, **_kwargs: dict[str, Any]
) -> _HandlerDelegate:
"""Route to correct API handler."""
- api_version = request.path.split("/")[2]
- endpoint = request.path.split("/")[3]
- endpoint_handler = f"{endpoint.title()}APIHandler"
+ try:
+ api_version = request.path.split("/")[2]
+ endpoint = request.path.split("/")[3]
+ except IndexError:
+ LOGGER.warning(
+ f"Invalid API request URL: {request.path}",
+ exc_info=True,
+ )
+ handler = APINotFoundHandler
+ return self._application.get_handler_delegate(
+ request=request,
+ target_class=handler,
+ target_kwargs={"vis": self._vis},
+ )
+ endpoint_handler = f"{endpoint.title()}APIHandler"
try:
handler = getattr(
importlib.import_module(
diff --git a/viseron/components/webserver/api/handlers.py b/viseron/components/webserver/api/handlers.py
index f39f331b7..164fc0c05 100644
--- a/viseron/components/webserver/api/handlers.py
+++ b/viseron/components/webserver/api/handlers.py
@@ -1,12 +1,13 @@
"""API handlers."""
from __future__ import annotations
+import inspect
import json
import logging
from functools import partial
from http import HTTPStatus
from re import Pattern
-from typing import TYPE_CHECKING, Any, Literal, TypedDict
+from typing import TYPE_CHECKING, Any, Literal, TypedDict, cast
import tornado.routing
import voluptuous as vol
@@ -42,6 +43,7 @@ class Route(TypedDict):
requires_auth: NotRequired[bool]
requires_camera_token: NotRequired[bool]
requires_group: NotRequired[list[Group]]
+ allow_token_parameter: NotRequired[bool]
json_body_schema: NotRequired[Schema]
request_arguments_schema: NotRequired[Schema]
@@ -103,17 +105,15 @@ def handle_method_not_allowed(self) -> None:
HTTPStatus.METHOD_NOT_ALLOWED, f"Method '{self.request.method}' not allowed"
)
- def validate_json_body(self, route: Route) -> bool:
+ def validate_json_body(
+ self, route: Route
+ ) -> tuple[Literal[False], str] | tuple[Literal[True], None]:
"""Validate JSON body."""
if schema := route.get("json_body_schema", None):
try:
json_body = json.loads(self.request.body)
except json.JSONDecodeError:
- self.response_error(
- HTTPStatus.BAD_REQUEST,
- reason=f"Invalid JSON in body: {self.request.body.decode()}",
- )
- return False
+ return False, f"Invalid JSON in body: {self.request.body.decode()}"
try:
self.json_body = schema(json_body)
@@ -122,30 +122,47 @@ def validate_json_body(self, route: Route) -> bool:
f"Invalid body: {self.request.body.decode()}",
exc_info=True,
)
- self.response_error(
- HTTPStatus.BAD_REQUEST,
- reason="Invalid body: {}. {}".format(
+ return (
+ False,
+ "Invalid body: {}. {}".format(
self.request.body.decode(),
humanize_error(json_body, err),
),
)
- return False
- return True
+ return True, None
- def _construct_jwt_from_cookies(self) -> str | None:
- """Construct JWT from cookies."""
+ def _construct_jwt_from_header_and_cookies(self) -> str | None:
+ """Construct JWT from Header and Cookies."""
signature = self.get_secure_cookie("signature_cookie")
if signature is None:
return None
- return self.request.headers.get("Authorization", "") + "." + signature.decode()
+ jwt_header_payload = self.request.headers.get("Authorization", None)
+ if jwt_header_payload is None:
+ return None
+ return jwt_header_payload + "." + signature.decode()
+
+ def _construct_jwt_from_parameter_and_cookies(self) -> str | None:
+ """Construct JWT from Query parameter 'token' and Cookies."""
+ signature = self.get_secure_cookie("signature_cookie")
+ if signature is None:
+ return None
+ jwt_header_payload = self.get_argument("token", None)
+ if jwt_header_payload is None:
+ return None
+ return "Bearer " + jwt_header_payload + "." + signature.decode()
def validate_auth_header(self) -> bool:
"""Validate auth header."""
# Call is coming from browser? Construct the JWT from the cookies
+ auth_header = None
if self.request.headers.get("X-Requested-With", "") == "XMLHttpRequest":
self.browser_request = True
- auth_header = self._construct_jwt_from_cookies()
- else:
+ auth_header = self._construct_jwt_from_header_and_cookies()
+ # Route allows JWT Header + Payload in URL parameter
+ if auth_header is None and self.route.get("allow_token_parameter", False):
+ auth_header = self._construct_jwt_from_parameter_and_cookies()
+ # Header could not be constructed from cookies or URL parameter
+ if auth_header is None:
auth_header = self.request.headers.get("Authorization", None)
if auth_header is None:
@@ -166,7 +183,20 @@ def validate_auth_header(self) -> bool:
auth_val, check_refresh_token=self.browser_request
)
- def route_request(self) -> None:
+ def _allow_token_parameter(self, schema: Schema, route: Route) -> Schema:
+ """Allow token parameter in schema."""
+ if route.get("allow_token_parameter", False):
+ try:
+ schema = schema.extend({vol.Optional("token"): str})
+ except AssertionError:
+ LOGGER.warning(
+ "Schema is not a dict, cannot extend with token parameter "
+ "for route %s",
+ self.request.uri,
+ )
+ return schema
+
+ async def route_request(self) -> None:
"""Route request to correct API endpoint."""
unsupported_method = False
@@ -181,7 +211,7 @@ def route_request(self) -> None:
self.route = route
if self._webserver.auth and route.get("requires_auth", True):
- if not self.validate_auth_header():
+ if not await self.run_in_executor(self.validate_auth_header):
self.response_error(
HTTPStatus.UNAUTHORIZED, reason="Authentication required"
)
@@ -228,6 +258,8 @@ def route_request(self) -> None:
}
if schema := route.get("request_arguments_schema", None):
try:
+ # Implicitly allow token parameter if route allows it
+ schema = self._allow_token_parameter(schema, route)
self.request_arguments = schema(request_arguments)
except vol.Invalid as err:
LOGGER.error(
@@ -257,7 +289,9 @@ def route_request(self) -> None:
)
return
- camera = self._get_camera(camera_identifier)
+ camera = await self.run_in_executor(
+ self._get_camera, camera_identifier
+ )
if not camera:
self.response_error(
HTTPStatus.NOT_FOUND,
@@ -265,14 +299,23 @@ def route_request(self) -> None:
)
return
- if not self.validate_camera_token(camera):
+ if not await self.run_in_executor(
+ self.validate_camera_token, camera
+ ):
self.response_error(
HTTPStatus.UNAUTHORIZED,
reason="Unauthorized",
)
return
- if not self.validate_json_body(route):
+ result, reason = await self.run_in_executor(
+ self.validate_json_body, route
+ )
+ if not result:
+ self.response_error(
+ HTTPStatus.BAD_REQUEST,
+ reason=cast(str, reason),
+ )
return
LOGGER.debug(
@@ -287,8 +330,10 @@ def route_request(self) -> None:
),
)
try:
- getattr(self, route["method"])(*path_args, **path_kwargs)
- return
+ func = getattr(self, route["method"])
+ if inspect.iscoroutinefunction(func):
+ return await func(*path_args, **path_kwargs)
+ return func(*path_args, **path_kwargs)
except Exception as error: # pylint: disable=broad-except
LOGGER.error(
f"Error in API {self.__class__.__name__}."
@@ -308,26 +353,26 @@ def route_request(self) -> None:
LOGGER.warning(f"Endpoint not found for URI: {self.request.uri}")
self.handle_endpoint_not_found()
- def delete(self) -> None:
+ async def delete(self) -> None:
"""Route DELETE requests."""
- self.route_request()
+ await self.route_request()
- def get(self) -> None:
+ async def get(self) -> None:
"""Route GET requests."""
- self.route_request()
+ await self.route_request()
- def post(self) -> None:
+ async def post(self) -> None:
"""Route POST requests."""
- self.route_request()
+ await self.route_request()
- def put(self) -> None:
+ async def put(self) -> None:
"""Route PUT requests."""
- self.route_request()
+ await self.route_request()
class APINotFoundHandler(BaseAPIHandler):
"""Default handler."""
- def get(self) -> None:
+ async def get(self) -> None:
"""Catch all methods."""
self.response_error(HTTPStatus.NOT_FOUND, "Endpoint not found")
diff --git a/viseron/components/webserver/api/v1/__init__.py b/viseron/components/webserver/api/v1/__init__.py
index c2f05d6f1..500fe4700 100644
--- a/viseron/components/webserver/api/v1/__init__.py
+++ b/viseron/components/webserver/api/v1/__init__.py
@@ -4,6 +4,8 @@
from viseron.components.webserver.api.v1.camera import CameraAPIHandler
from viseron.components.webserver.api.v1.cameras import CamerasAPIHandler
from viseron.components.webserver.api.v1.config import ConfigAPIHandler
+from viseron.components.webserver.api.v1.events import EventsAPIHandler
+from viseron.components.webserver.api.v1.hls import HlsAPIHandler
from viseron.components.webserver.api.v1.onboarding import OnboardingAPIHandler
from viseron.components.webserver.api.v1.recordings import RecordingsAPIHandler
@@ -12,6 +14,8 @@
"CameraAPIHandler",
"CamerasAPIHandler",
"ConfigAPIHandler",
+ "EventsAPIHandler",
+ "HlsAPIHandler",
"OnboardingAPIHandler",
"RecordingsAPIHandler",
)
diff --git a/viseron/components/webserver/api/v1/auth.py b/viseron/components/webserver/api/v1/auth.py
index 2923ced8a..47c800a64 100644
--- a/viseron/components/webserver/api/v1/auth.py
+++ b/viseron/components/webserver/api/v1/auth.py
@@ -80,20 +80,23 @@ class AuthAPIHandler(BaseAPIHandler):
},
]
- def auth_enabled(self) -> None:
+ async def auth_enabled(self) -> None:
"""Return if auth is enabled."""
response = {
"enabled": bool(self._webserver.auth) if self._webserver.auth else False,
- "onboarding_complete": self._webserver.auth.onboarding_complete
+ "onboarding_complete": await self.run_in_executor(
+ self._webserver.auth.onboarding_complete
+ )
if self._webserver.auth
else False,
}
self.response_success(response=response)
- def auth_create(self) -> None:
+ async def auth_create(self) -> None:
"""Create a new user."""
try:
- self._webserver.auth.add_user(
+ await self.run_in_executor(
+ self._webserver.auth.add_user,
self.json_body["name"].strip(),
self.json_body["username"].strip().casefold(),
self.json_body["password"],
@@ -104,12 +107,12 @@ def auth_create(self) -> None:
return
self.response_success()
- def auth_user(self, user_id: str) -> None:
+ async def auth_user(self, user_id: str) -> None:
"""Get a user.
Returns 200 OK with user data if user exists.
"""
- user = self._webserver.auth.get_user(user_id)
+ user = await self.run_in_executor(self._webserver.auth.get_user, user_id)
if user is None:
self.response_error(HTTPStatus.NOT_FOUND, reason="User not found")
return
@@ -121,11 +124,13 @@ def auth_user(self, user_id: str) -> None:
}
)
- def auth_login(self) -> None:
+ async def auth_login(self) -> None:
"""Login."""
try:
- user = self._webserver.auth.validate_user(
- self.json_body["username"], self.json_body["password"]
+ user = await self.run_in_executor(
+ self._webserver.auth.validate_user,
+ self.json_body["username"],
+ self.json_body["password"],
)
except AuthenticationFailed:
self.response_error(
@@ -133,13 +138,16 @@ def auth_login(self) -> None:
)
return
- refresh_token = self._webserver.auth.generate_refresh_token(
+ refresh_token = await self.run_in_executor(
+ self._webserver.auth.generate_refresh_token,
user.id,
self.json_body["client_id"],
"normal",
)
- access_token = self._webserver.auth.generate_access_token(
- refresh_token, self.request.remote_ip
+ access_token = await self.run_in_executor(
+ self._webserver.auth.generate_access_token,
+ refresh_token,
+ self.request.remote_ip,
)
self.set_cookies(refresh_token, access_token, user, new_session=True)
@@ -151,15 +159,18 @@ def auth_login(self) -> None:
),
)
- def auth_logout(self) -> None:
+ async def auth_logout(self) -> None:
"""Logout."""
refresh_token_cookie = self.get_secure_cookie("refresh_token")
if refresh_token_cookie is not None:
- refresh_token = self._webserver.auth.get_refresh_token_from_token(
- refresh_token_cookie.decode()
+ refresh_token = await self.run_in_executor(
+ self._webserver.auth.get_refresh_token_from_token,
+ refresh_token_cookie.decode(),
)
if refresh_token is not None:
- self._webserver.auth.delete_refresh_token(refresh_token)
+ await self.run_in_executor(
+ self._webserver.auth.delete_refresh_token, refresh_token
+ )
self.clear_all_cookies()
self.response_success()
@@ -202,10 +213,10 @@ def _handle_refresh_token(
),
)
- def auth_token(self) -> None:
+ async def auth_token(self) -> None:
"""Handle token request."""
if self.json_body["grant_type"] == "refresh_token":
- status, response = self._handle_refresh_token()
+ status, response = await self.run_in_executor(self._handle_refresh_token)
if status == HTTPStatus.OK:
self.response_success(response=response)
return
diff --git a/viseron/components/webserver/api/v1/camera.py b/viseron/components/webserver/api/v1/camera.py
index 7ce912e89..b5a9e0779 100644
--- a/viseron/components/webserver/api/v1/camera.py
+++ b/viseron/components/webserver/api/v1/camera.py
@@ -123,9 +123,9 @@ def _snapshot_from_memory(self, camera: AbstractCamera) -> bytes | None:
return jpg
return None
- def get_snapshot(self, camera_identifier: str) -> None:
+ async def get_snapshot(self, camera_identifier: str) -> None:
"""Return camera snapshot."""
- camera = self._get_camera(camera_identifier)
+ camera = await self.run_in_executor(self._get_camera, camera_identifier)
if not camera:
self.response_error(
@@ -136,9 +136,9 @@ def get_snapshot(self, camera_identifier: str) -> None:
jpg = None
if camera.still_image[CONFIG_URL]:
- jpg = self._snapshot_from_url(camera)
+ jpg = await self.run_in_executor(self._snapshot_from_url, camera)
else:
- jpg = self._snapshot_from_memory(camera)
+ jpg = await self.run_in_executor(self._snapshot_from_memory, camera)
if jpg is None:
self.response_error(
@@ -150,7 +150,7 @@ def get_snapshot(self, camera_identifier: str) -> None:
self.response_success(response=jpg, headers={"Content-Type": "image/jpeg"})
return
- def get_camera(self, camera_identifier: str) -> None:
+ async def get_camera(self, camera_identifier: str) -> None:
"""Return camera."""
camera = self._get_camera(
camera_identifier, failed=self.request_arguments["failed"]
diff --git a/viseron/components/webserver/api/v1/cameras.py b/viseron/components/webserver/api/v1/cameras.py
index 99665f6ba..5d87d248e 100644
--- a/viseron/components/webserver/api/v1/cameras.py
+++ b/viseron/components/webserver/api/v1/cameras.py
@@ -26,13 +26,13 @@ class CamerasAPIHandler(BaseAPIHandler):
},
]
- def get_cameras(self) -> None:
+ async def get_cameras(self) -> None:
"""Return cameras."""
self.response_success(
response=self._vis.data[REGISTERED_DOMAINS].get(CAMERA_DOMAIN, {})
)
- def get_failed_cameras(self) -> None:
+ async def get_failed_cameras(self) -> None:
"""Return failed cameras."""
failed_cameras = {}
for failed_camera in (
diff --git a/viseron/components/webserver/api/v1/config.py b/viseron/components/webserver/api/v1/config.py
index f284dd381..647359f3e 100644
--- a/viseron/components/webserver/api/v1/config.py
+++ b/viseron/components/webserver/api/v1/config.py
@@ -18,9 +18,12 @@ class ConfigAPIHandler(BaseAPIHandler):
},
]
- def get_config(self) -> None:
+ async def get_config(self) -> None:
"""Return Viseron config."""
- with open(CONFIG_PATH, encoding="utf-8") as config_file:
- config = config_file.read()
+ def read_config() -> str:
+ with open(CONFIG_PATH, encoding="utf-8") as config_file:
+ return config_file.read()
+
+ config = await self.run_in_executor(read_config)
self.response_success(response=config)
diff --git a/viseron/components/webserver/api/v1/events.py b/viseron/components/webserver/api/v1/events.py
new file mode 100644
index 000000000..74e07d0a6
--- /dev/null
+++ b/viseron/components/webserver/api/v1/events.py
@@ -0,0 +1,186 @@
+"""API handler for Events."""
+from __future__ import annotations
+
+import datetime
+import logging
+from http import HTTPStatus
+from typing import TYPE_CHECKING, Callable
+
+import voluptuous as vol
+from sqlalchemy import select
+
+from viseron.components.storage.models import Motion, Objects, Recordings
+from viseron.components.webserver.api.handlers import BaseAPIHandler
+from viseron.domains.camera import FailedCamera
+
+if TYPE_CHECKING:
+ from sqlalchemy.orm import Session
+
+ from viseron.domains.camera import AbstractCamera
+
+LOGGER = logging.getLogger(__name__)
+
+
+class EventsAPIHandler(BaseAPIHandler):
+ """API handler for Events."""
+
+ routes = [
+ {
+ "path_pattern": (r"/events/(?Ptrue
, the thumbnail that is created on start of recording is "
- "saved to {folder}/{camera_identifier}/latest_thumbnail.jpg
"
+ "saved to {camera_identifier}/latest_thumbnail.jpg
23:59:59.jpg
."
)
DESC_EXTENSION = "The file extension used for recordings."
+DEPRECATED_EXTENSION = "mp4
is the only supported extension."
+WARNING_EXTENSION = (
+ "Config option 'extension' is deprecated and will be removed in a "
+ "future version. 'mp4' is the only supported extension."
+)
+
DESC_THUMBNAIL = "Options for the thumbnail created on start of a recording."
DESC_FILENAME_PATTERN_THUMBNAIL = (
"A strftime pattern for saved thumbnails.23:59:59.jpg
."
)
+DEPRECATED_FILENAME_PATTERN_THUMBNAIL = (
+ "Thumbnails are stored with the same filename as the recording ID in the "
+ "database, for example: 1.jpg, 2.jpg, 3.jpg etc."
+)
+WARNING_FILENAME_PATTERN_THUMBNAIL = (
+ "Config option 'filename_pattern' is deprecated and will be removed in a future "
+ "version. {DEPRECATED_FILENAME_PATTERN_THUMBNAIL}"
+)
+DESC_STORAGE = (
+ "Storage options for the camera.true
, objects matching this filter will be stored "
+ "in the database, as well as having a snapshot saved. "
+ "Labels with trigger_recorder
set to true
will always "
+ "be stored when a recording starts, regardless of this setting."
+)
+DESC_LABEL_STORE_INTERVAL = (
+ "The interval at which the label should be stored in the database, in seconds. "
+ "If set to 0, the label will be stored every time it is detected."
+)
# CAMERA_SCHEMA constants
CONFIG_CAMERAS = "cameras"
diff --git a/viseron/domains/object_detector/detected_object.py b/viseron/domains/object_detector/detected_object.py
index 99ff12c6e..aab1950f6 100644
--- a/viseron/domains/object_detector/detected_object.py
+++ b/viseron/domains/object_detector/detected_object.py
@@ -5,6 +5,7 @@
from typing import Any
from viseron.domains.camera.shared_frames import SharedFrame
+from viseron.events import EventData
from viseron.helpers import (
calculate_absolute_coords,
calculate_relative_coords,
@@ -66,6 +67,7 @@ def __init__(
self._rel_width = float(round(self._rel_x2 - self._rel_x1, 3))
self._rel_height = float(round(self._rel_y2 - self._rel_y1, 3))
self._trigger_recorder = False
+ self._store = False
self._relevant = False
self._filter_hit = None
@@ -132,6 +134,15 @@ def trigger_recorder(self):
def trigger_recorder(self, value) -> None:
self._trigger_recorder = value
+ @property
+ def store(self):
+ """Return if object should be stored in database."""
+ return self._store
+
+ @store.setter
+ def store(self, value) -> None:
+ self._store = value
+
@property
def relevant(self):
"""Return if object is relevant.
@@ -172,10 +183,18 @@ def zero_if_negative(value):
@dataclass
-class EventDetectedObjectsData:
+class EventDetectedObjectsData(EventData):
"""Event with information on objects in field of view or zone."""
camera_identifier: str
shared_frame: SharedFrame | None
objects: list[DetectedObject]
zone: Any = None
+
+ def as_dict(self) -> dict[str, Any]:
+ """Convert to dict."""
+ return {
+ "camera_identifier": self.camera_identifier,
+ "objects": [obj.as_dict() for obj in self.objects],
+ "zone": self.zone,
+ }
diff --git a/viseron/domains/object_detector/zone.py b/viseron/domains/object_detector/zone.py
index dc1ec003a..042930c39 100644
--- a/viseron/domains/object_detector/zone.py
+++ b/viseron/domains/object_detector/zone.py
@@ -2,7 +2,7 @@
from __future__ import annotations
import logging
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Any
from viseron.domains.camera.const import DOMAIN as CAMERA_DOMAIN
from viseron.domains.object_detector.const import CONFIG_LABEL_LABEL
@@ -44,6 +44,7 @@ def __init__(
) -> None:
self._vis = vis
self._camera = vis.get_registered_domain(CAMERA_DOMAIN, camera_identifier)
+ self._zone_config = zone_config
self._logger = logging.getLogger(__name__ + "." + camera_identifier)
self._coordinates = generate_numpy_from_coordinates(
@@ -135,3 +136,11 @@ def objects_in_zone_setter(
def name(self) -> str:
"""Return name of zone."""
return self._name
+
+ def as_dict(self) -> dict[str, Any]:
+ """Return zone as dict."""
+ return {
+ "coordinates": self._zone_config,
+ "name": self._name,
+ "camera_identifier": self._camera.identifier,
+ }
diff --git a/viseron/events.py b/viseron/events.py
new file mode 100644
index 000000000..ef7d00148
--- /dev/null
+++ b/viseron/events.py
@@ -0,0 +1,42 @@
+"""This module contains classes related to events."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Any, Generic
+
+from typing_extensions import TypeVar
+
+T = TypeVar("T")
+
+
+@dataclass
+class Event(Generic[T]):
+ """Dataclass that holds an event."""
+
+ name: str
+ data: T
+ timestamp: float
+
+ def as_dict(self) -> dict[str, Any]:
+ """Convert Event to dict."""
+ return {
+ "name": self.name.split("/", 1)[1],
+ "data": self.data,
+ "timestamp": self.timestamp,
+ }
+
+
+class EventData:
+ """Base class that holds event data."""
+
+ # Indicates if the event is a JSON serializable object
+ json_serializable: bool = True
+
+
+class EventEmptyData(EventData):
+ """Empty event data."""
+
+ def as_dict(self) -> dict[str, Any]:
+ """Convert EventEmptyData to dict."""
+ return {}
diff --git a/viseron/helpers/__init__.py b/viseron/helpers/__init__.py
index 866a07a4a..a0def61a2 100644
--- a/viseron/helpers/__init__.py
+++ b/viseron/helpers/__init__.py
@@ -1,9 +1,11 @@
"""General helper functions."""
from __future__ import annotations
+import datetime
import linecache
import logging
import math
+import multiprocessing as mp
import os
import tracemalloc
from queue import Full, Queue
@@ -22,7 +24,14 @@
LOGGER = logging.getLogger(__name__)
-def calculate_relative_contours(contours, resolution: tuple[int, int]):
+def utcnow() -> datetime.datetime:
+ """Return current UTC time."""
+ return datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
+
+
+def calculate_relative_contours(
+ contours, resolution: tuple[int, int]
+) -> list[np.ndarray]:
"""Convert contours with absolute coords to relative."""
relative_contours = []
for contour in contours:
@@ -109,7 +118,7 @@ def put_object_label_relative(frame, obj, frame_res, color=(255, 0, 0)) -> None:
thickness=FONT_THICKNESS,
)[0]
- filter_text = None
+ filter_text = ""
if obj.filter_hit:
filter_text = f"Filter: {obj.filter_hit}"
(filter_text_width, filter_text_height) = cv2.getTextSize(
@@ -275,7 +284,7 @@ def draw_object_mask(frame, mask_points) -> None:
def pop_if_full(
- queue: Queue,
+ queue: Queue | mp.Queue | tq.Queue,
item: Any,
logger: logging.Logger = LOGGER,
name: str = "unknown",
@@ -296,7 +305,7 @@ def slugify(text: str) -> str:
return unicode_slug.slugify(text, separator="_")
-def create_directory(path) -> None:
+def create_directory(path: str) -> None:
"""Create a directory."""
try:
if not os.path.isdir(path):
@@ -434,6 +443,26 @@ def convert_letterboxed_bbox(
)
+def zoom_boundingbox(
+ frame: np.ndarray,
+ bounding_box: tuple[int, int, int, int],
+ min_size=300,
+ crop_correction_factor=1,
+) -> np.ndarray:
+ """Zoom in on a bounding box in an image."""
+ x1, y1, x2, y2 = bounding_box
+ size = max(int(max(x2 - x1, y2 - y1) * crop_correction_factor), min_size)
+
+ x_offset = max(
+ 0, min(int((x2 - x1) / 2.0 + x1 - size / 2.0), frame.shape[1] - size)
+ )
+ y_offset = max(
+ 0, min(int((y2 - y1) / 2.0 + y1 - size / 2.0), frame.shape[0] - size)
+ )
+
+ return frame.copy()[y_offset : y_offset + size, x_offset : x_offset + size]
+
+
def memory_usage_profiler(logger, key_type="lineno", limit=5) -> None:
"""Print a table with the lines that are using the most memory."""
snapshot = tracemalloc.take_snapshot()
diff --git a/viseron/helpers/filter.py b/viseron/helpers/filter.py
index 27ef08ada..9f6329f92 100644
--- a/viseron/helpers/filter.py
+++ b/viseron/helpers/filter.py
@@ -1,16 +1,20 @@
"""Used to filter out unwanted objects."""
+from datetime import timedelta
+
from viseron.domains.object_detector.const import (
CONFIG_LABEL_CONFIDENCE,
CONFIG_LABEL_HEIGHT_MAX,
CONFIG_LABEL_HEIGHT_MIN,
CONFIG_LABEL_LABEL,
CONFIG_LABEL_REQUIRE_MOTION,
+ CONFIG_LABEL_STORE,
+ CONFIG_LABEL_STORE_INTERVAL,
CONFIG_LABEL_TRIGGER_RECORDER,
CONFIG_LABEL_WIDTH_MAX,
CONFIG_LABEL_WIDTH_MIN,
)
from viseron.domains.object_detector.detected_object import DetectedObject
-from viseron.helpers import object_in_polygon
+from viseron.helpers import object_in_polygon, utcnow
class Filter:
@@ -26,8 +30,14 @@ def __init__(self, camera_resolution, object_filter, mask) -> None:
self._height_min = object_filter[CONFIG_LABEL_HEIGHT_MIN]
self._height_max = object_filter[CONFIG_LABEL_HEIGHT_MAX]
self._trigger_recorder = object_filter[CONFIG_LABEL_TRIGGER_RECORDER]
+ self._store = object_filter[CONFIG_LABEL_STORE]
+ self._store_interval = timedelta(
+ seconds=object_filter[CONFIG_LABEL_STORE_INTERVAL]
+ )
self._require_motion = object_filter[CONFIG_LABEL_REQUIRE_MOTION]
+ self._last_stored = utcnow() - self._store_interval
+
def filter_confidence(self, obj: DetectedObject) -> bool:
"""Return if confidence filter is met."""
if obj.confidence > self._confidence:
@@ -66,6 +76,17 @@ def filter_object(self, obj: DetectedObject) -> bool:
and self.filter_mask(obj)
)
+ def should_store(self, obj: DetectedObject) -> bool:
+ """Return True if object should be stored."""
+ # Only store if store interval has passed
+ if self._store and utcnow() - self._last_stored > self._store_interval:
+ obj.store = True
+ self._last_stored = utcnow()
+ return True
+
+ obj.store = False
+ return False
+
@property
def confidence(self) -> bool:
"""Return configured confidence of filter."""
@@ -76,6 +97,11 @@ def trigger_recorder(self) -> bool:
"""Return if label triggers recorder."""
return self._trigger_recorder
+ @property
+ def store(self) -> bool:
+ """Return if label should be stored in database."""
+ return self._store
+
@property
def require_motion(self) -> bool:
"""Return if label requires motion to trigger recorder."""
diff --git a/viseron/helpers/fixed_size_dict.py b/viseron/helpers/fixed_size_dict.py
new file mode 100644
index 000000000..e4c4b9019
--- /dev/null
+++ b/viseron/helpers/fixed_size_dict.py
@@ -0,0 +1,42 @@
+"""A dictionary with a fixed size.
+
+If the dictionary exceeds the maximum size, the oldest item is removed.
+"""
+from __future__ import annotations
+
+import typing
+from collections import OrderedDict
+
+_KT = typing.TypeVar("_KT")
+_VT = typing.TypeVar("_VT")
+
+
+class FixedSizeDict(OrderedDict, typing.MutableMapping[_KT, _VT]):
+ """A dictionary with a fixed size.
+
+ If the dictionary exceeds the maximum size, the oldest item is removed.
+ Each time an item is accessed it is moved to the end of the dictionary.
+ """
+
+ def __init__(self, *args, maxlen=0, **kwargs):
+ self._maxlen = maxlen
+ super().__init__(*args, **kwargs)
+
+ def __setitem__(self, key, value) -> None:
+ """Set an item in the dictionary.
+
+ Deleting the oldest item if the dictionary exceeds the maximum size.
+ """
+ super().__setitem__(key, value)
+ if self._maxlen > 0:
+ if len(self) > self._maxlen:
+ self.pop(next(iter(self)))
+
+ def get(self, key: _KT, *arg) -> _VT | None:
+ """Get an item from the dictionary.
+
+ Move the item to the end of the dictionary so that it is not removed.
+ """
+ if key in self:
+ self.move_to_end(key)
+ return super().get(key, *arg)
diff --git a/viseron/helpers/json.py b/viseron/helpers/json.py
index 589698b54..e79d84da5 100644
--- a/viseron/helpers/json.py
+++ b/viseron/helpers/json.py
@@ -5,6 +5,8 @@
from enum import Enum
from typing import Any
+import numpy as np
+
class JSONEncoder(json.JSONEncoder):
"""Helper to convert objects to JSON."""
@@ -12,7 +14,7 @@ class JSONEncoder(json.JSONEncoder):
def default(self, o: Any) -> Any:
"""Convert objects."""
if isinstance(o, datetime.datetime):
- return o.isoformat()
+ return o.replace(tzinfo=datetime.timezone.utc).isoformat()
if hasattr(o, "as_dict"):
return o.as_dict()
if dataclasses.is_dataclass(o):
@@ -21,5 +23,7 @@ def default(self, o: Any) -> Any:
return int(o.total_seconds())
if isinstance(o, Enum):
return o.value
+ if isinstance(o, np.ndarray):
+ return o.tolist()
return json.JSONEncoder.default(self, o)
diff --git a/viseron/helpers/logs.py b/viseron/helpers/logs.py
index 85b55049a..3763d2848 100644
--- a/viseron/helpers/logs.py
+++ b/viseron/helpers/logs.py
@@ -1,9 +1,14 @@
"""Logging helpers Viseron."""
+from __future__ import annotations
+
+import io
import logging
import os
import re
import threading
-from typing import Any, List, Literal
+import typing
+from types import TracebackType
+from typing import Any, AnyStr, Iterable, Iterator, Literal, TextIO
from colorlog import ColoredFormatter
@@ -62,18 +67,21 @@ def filter(self, record: logging.LogRecord) -> bool:
class UnhelpfullLogFilter(logging.Filter):
"""Filter out unimportant logs."""
- def __init__(self, errors_to_ignore: List[Any], *args, **kwargs) -> None:
+ def __init__(self, errors_to_ignore: list[Any], *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.errors_to_ignore = errors_to_ignore
def filter(self, record) -> bool:
"""Filter log record."""
- if any(error in record.msg for error in self.errors_to_ignore):
- return False
if isinstance(record.msg, str) and (
- record.msg == "" or record.msg.isspace() or not record.msg.strip()
+ record.msg == ""
+ or record.msg.isspace()
+ or not record.msg.strip()
+ or record.msg == "\n"
):
return False
+ if any(error in record.msg for error in self.errors_to_ignore):
+ return False
return True
@@ -142,7 +150,9 @@ def fileno(self):
def run(self) -> None:
"""Run the thread, logging everything."""
for line in iter(self.pipe_reader.readline, ""):
- self._logger.log(self._output_level, line.strip().strip("\n"))
+ log_str = line.strip().strip("\n")
+ if log_str:
+ self._logger.log(self._output_level, log_str)
self.pipe_reader.close()
@@ -190,3 +200,95 @@ def close(self) -> None:
"""Close the write end of the pipe."""
os.close(self._write_filedescriptor)
os.dup2(self._old_fd, self._fd)
+
+
+class StreamToLogger(typing.TextIO):
+ """Stream object that redirects its output to standard logging."""
+
+ def __init__(self, logger: logging.Logger, log_level: int) -> None:
+ """Initialize the object."""
+ self.logger = logger
+ self.log_level = log_level
+
+ def __enter__(self) -> TextIO:
+ """Enter context manager."""
+ raise io.UnsupportedOperation
+
+ def close(self) -> None:
+ """Close the stream."""
+ raise io.UnsupportedOperation
+
+ def fileno(self) -> int:
+ """Return the file descriptor."""
+ raise io.UnsupportedOperation
+
+ def flush(self) -> None:
+ """Flush the stream."""
+ raise io.UnsupportedOperation
+
+ def isatty(self) -> bool:
+ """Return if the stream is a tty."""
+ raise io.UnsupportedOperation
+
+ def read(self, n: int = -1) -> AnyStr:
+ """Read from the stream."""
+ raise io.UnsupportedOperation
+
+ def readable(self) -> bool:
+ """Return if the stream is readable."""
+ raise io.UnsupportedOperation
+
+ def readline(self, limit: int = -1) -> AnyStr:
+ """Read a line from the stream."""
+ raise io.UnsupportedOperation
+
+ def readlines(self, hint: int = -1) -> list[AnyStr]:
+ """Read lines from the stream."""
+ raise io.UnsupportedOperation
+
+ def seek(self, offset: int, whence: int = 0) -> int:
+ """Seek in the stream."""
+ raise io.UnsupportedOperation
+
+ def seekable(self) -> bool:
+ """Return if the stream is seekable."""
+ raise io.UnsupportedOperation
+
+ def tell(self) -> int:
+ """Return the current position in the stream."""
+ raise io.UnsupportedOperation
+
+ def truncate(self, size: int | None = None) -> int:
+ """Truncate the stream."""
+ raise io.UnsupportedOperation
+
+ def writable(self) -> bool:
+ """Return if the stream is writable."""
+ raise io.UnsupportedOperation
+
+ def writelines(self, lines: Iterable[AnyStr]) -> None:
+ """Write lines to the stream."""
+ raise io.UnsupportedOperation
+
+ def __next__(self) -> AnyStr:
+ """Return the next line from the stream."""
+ raise io.UnsupportedOperation
+
+ def __iter__(self) -> Iterator[AnyStr]:
+ """Return an iterator over the stream."""
+ raise io.UnsupportedOperation
+
+ def __exit__(
+ self,
+ t: type[BaseException] | None,
+ value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> None:
+ """Exit context manager."""
+ raise io.UnsupportedOperation
+
+ def write(self, text: str): # pylint: disable=arguments-renamed
+ """Write to the logger."""
+ if text == "\n":
+ return
+ self.logger.log(self.log_level, text.rstrip())
diff --git a/viseron/helpers/validators.py b/viseron/helpers/validators.py
index cd85f1606..949fc7230 100644
--- a/viseron/helpers/validators.py
+++ b/viseron/helpers/validators.py
@@ -10,7 +10,20 @@
def deprecated(key: str, replacement: Optional[str] = None) -> Callable[[dict], dict]:
- """Mark key as deprecated and optionally replace it."""
+ """Mark key as deprecated and optionally replace it.
+
+ Usage example:
+ CONFIG_SCHEMA = vol.Schema(
+ vol.All(
+ {
+ vol.Optional(
+ "this_key_is_deprecated"
+ ): str,
+ },
+ deprecated("this_key_is_deprecated", "this_key_is_replacement")
+ )
+ )
+ """
def validator(config):
"""Warn if key is present. Replace it if a value is given."""
@@ -37,6 +50,66 @@ def validator(config):
return validator
+class Deprecated(vol.Optional):
+ """Mark key as deprecated.
+
+ message: Displayed in the generated documentation.
+ warning: Displayed in the logs.
+ """
+
+ def __init__(
+ self,
+ schema: Any,
+ raise_error=False,
+ message=None,
+ description=None,
+ warning=None,
+ ) -> None:
+ self._key = schema
+ self._raise_error = raise_error
+ self._message = message
+ self._warning = warning
+
+ super().__init__(
+ schema,
+ default=vol.UNDEFINED,
+ description=description,
+ )
+
+ @property
+ def key(self) -> str:
+ """Return deprecated key."""
+ return self._key
+
+ @property
+ def message(self) -> str:
+ """Return deprecation message."""
+ return (
+ f"Config option '{self.key}' is deprecated "
+ "and will be removed in a future version."
+ if not self._message
+ else self._message
+ )
+
+ @property
+ def warning(self) -> str:
+ """Return deprecation warning."""
+ return (
+ f"Config option '{self.key}' is deprecated "
+ "and will be removed in a future version. "
+ "Please remove it from your configuration."
+ if not self._warning
+ else self._warning
+ )
+
+ def __call__(self, v):
+ """Warn user about deprecated key."""
+ if self._raise_error:
+ raise vol.Invalid(self.warning)
+ LOGGER.warning(self.warning)
+ return super().__call__(v)
+
+
def slug(value: Any) -> str:
"""Validate value is a valid slug."""
if value is None:
diff --git a/viseron/states.py b/viseron/states.py
index a4a258ee7..edf47a596 100644
--- a/viseron/states.py
+++ b/viseron/states.py
@@ -8,6 +8,7 @@
from typing import TYPE_CHECKING, Any
from viseron.const import EVENT_ENTITY_ADDED, EVENT_STATE_CHANGED
+from viseron.events import EventData
from viseron.helpers import slugify
if TYPE_CHECKING:
@@ -19,7 +20,7 @@
@dataclass
-class EventStateChangedData:
+class EventStateChangedData(EventData):
"""State changed event data."""
entity_id: str
@@ -40,7 +41,7 @@ def as_dict(self):
@dataclass
-class EventEntityAddedData:
+class EventEntityAddedData(EventData):
"""Entity event data."""
entity: Entity
@@ -149,8 +150,7 @@ def add_entity(self, component: Component, entity: Entity):
entity.setup()
self._vis.dispatch_event(
- EVENT_ENTITY_ADDED,
- EventEntityAddedData(entity),
+ EVENT_ENTITY_ADDED, EventEntityAddedData(entity), store=False
)
self.set_state(entity)
return entity
diff --git a/viseron/watchdog/process_watchdog.py b/viseron/watchdog/process_watchdog.py
index da8dec5e5..b8342b2a2 100644
--- a/viseron/watchdog/process_watchdog.py
+++ b/viseron/watchdog/process_watchdog.py
@@ -1,10 +1,10 @@
"""Watchdog for long-running processes."""
from __future__ import annotations
-import datetime
import logging
import multiprocessing as mp
+from viseron.helpers import utcnow
from viseron.watchdog import WatchDog
LOGGER = logging.getLogger(__name__)
@@ -73,7 +73,7 @@ def start(self) -> None:
*self._args,
**self._kwargs,
)
- self._start_time = datetime.datetime.now().timestamp()
+ self._start_time = utcnow().timestamp()
self._started = True
self._process.start()
if self._register:
@@ -131,7 +131,7 @@ def watchdog(self) -> None:
if registered_process.is_alive():
continue
- now = datetime.datetime.now().timestamp()
+ now = utcnow().timestamp()
if (
registered_process.start_time
and now - registered_process.start_time
diff --git a/viseron/watchdog/subprocess_watchdog.py b/viseron/watchdog/subprocess_watchdog.py
index 8c5cbb8db..dc8eabb88 100644
--- a/viseron/watchdog/subprocess_watchdog.py
+++ b/viseron/watchdog/subprocess_watchdog.py
@@ -1,10 +1,10 @@
"""Watchdog for long-running threads."""
from __future__ import annotations
-import datetime
import logging
import subprocess as sp
+from viseron.helpers import utcnow
from viseron.watchdog import WatchDog
LOGGER = logging.getLogger(__name__)
@@ -67,7 +67,7 @@ def start(self) -> None:
*self._args,
**self._kwargs,
)
- self._start_time = datetime.datetime.now().timestamp()
+ self._start_time = utcnow().timestamp()
self._started = True
def restart(self) -> None:
@@ -107,7 +107,7 @@ def watchdog(self) -> None:
continue
if registered_process.subprocess.poll() is None:
continue
- now = datetime.datetime.now().timestamp()
+ now = utcnow().timestamp()
if now - registered_process.start_time < registered_process.grace_period:
continue
diff --git a/viseron/watchdog/thread_watchdog.py b/viseron/watchdog/thread_watchdog.py
index edc4b975e..bba78319a 100644
--- a/viseron/watchdog/thread_watchdog.py
+++ b/viseron/watchdog/thread_watchdog.py
@@ -3,6 +3,7 @@
import threading
from typing import Callable, Dict, List, Optional, overload
+from viseron.const import VISERON_SIGNAL_SHUTDOWN
from viseron.watchdog import WatchDog
LOGGER = logging.getLogger(__name__)
@@ -47,6 +48,7 @@ def __init__(
restart_method: Optional[Callable] = None,
base_class=None,
base_class_args=(),
+ stage: Optional[str] = VISERON_SIGNAL_SHUTDOWN,
) -> None:
...
@@ -68,6 +70,7 @@ def __init__(
restart_method: Optional[Callable] = None,
base_class=None,
base_class_args=(),
+ stage: Optional[str] = VISERON_SIGNAL_SHUTDOWN,
) -> None:
...
@@ -88,6 +91,7 @@ def __init__(
restart_method: Optional[Callable] = None,
base_class=None,
base_class_args=(),
+ stage: Optional[str] = VISERON_SIGNAL_SHUTDOWN,
) -> None:
# _started is set in Thread.__init__() but we set it here to make mypy happy
self._started = threading.Event()
@@ -117,6 +121,8 @@ def __init__(
self._restart_method = restart_method
self._base_class = base_class
self._base_class_args = base_class_args
+ self._stage = stage
+ setattr(self, "stage", stage)
@property
def started(self):
@@ -172,6 +178,7 @@ def clone(self):
restart_method=self._restart_method,
base_class=self._base_class,
base_class_args=self._base_class_args,
+ stage=self._stage,
)