From 85cc06d281f0c75b523f3635782488de19a55f4a Mon Sep 17 00:00:00 2001 From: Pablo Grill Date: Thu, 29 Jul 2021 17:12:03 -0300 Subject: [PATCH 1/7] Remove areas from processor. --- api/models/area.py | 18 - api/models/area_logger.py | 31 - api/models/config.py | 4 - api/models/export.py | 11 +- api/models/occupancy_rule.py | 107 - api/processor_api.py | 13 +- api/routers/area_loggers.py | 124 - api/routers/areas.py | 295 --- api/routers/cameras.py | 21 +- api/routers/config.py | 11 - api/routers/export.py | 69 +- api/routers/metrics/__init__.py | 1 - api/routers/metrics/area_metrics.py | 272 -- api/routers/metrics/metrics.py | 43 +- api/tests/app/test_area_all.py | 291 --- api/tests/app/test_area_metrics.py | 2249 ----------------- api/tests/app/test_area_occupancy_rules.py | 211 -- api/tests/data/config-x86-openvino_EMPTY.ini | 17 - .../data/config-x86-openvino_JUST_CAMERAS.ini | 17 - .../data/config-x86-openvino_METRICS.ini | 29 - .../data/processor/config/areas/ALL.json | 1 - api/tests/utils/example_models.py | 30 - api/tests/utils/fixtures_tests.py | 48 +- api/utils.py | 15 - config-coral.ini | 18 - config-jetson-nano.ini | 18 - config-jetson-tx2.ini | 18 - config-x86-gpu-tensorrt.ini | 18 - config-x86-gpu.ini | 19 - config-x86-openvino.ini | 18 - config-x86.ini | 18 - constants.py | 2 - libs/area_engine.py | 96 - libs/area_threading.py | 68 - libs/backups/s3_backup.py | 35 +- libs/config_engine.py | 38 +- libs/entities/area.py | 86 - libs/loggers/area_loggers/__init__.py | 0 .../area_loggers/file_system_logger.py | 36 - libs/loggers/area_loggers/logger.py | 16 - libs/metrics/__init__.py | 1 - libs/metrics/base.py | 33 +- libs/metrics/occupancy.py | 123 - libs/metrics/utils.py | 4 - libs/notifications/slack_notifications.py | 6 +- libs/processor_core.py | 23 - libs/reports/notifications.py | 24 +- libs/utils/config.py | 4 - libs/utils/loggers.py | 13 - libs/utils/mail_global_report.html | 9 - libs/utils/mailing.py | 9 +- libs/utils/notifications.py | 3 +- run_periodic_task.py | 11 +- 53 files changed, 64 insertions(+), 4631 deletions(-) delete mode 100644 api/models/area.py delete mode 100644 api/models/area_logger.py delete mode 100644 api/models/occupancy_rule.py delete mode 100644 api/routers/area_loggers.py delete mode 100644 api/routers/areas.py delete mode 100644 api/routers/metrics/area_metrics.py delete mode 100644 api/tests/app/test_area_all.py delete mode 100644 api/tests/app/test_area_metrics.py delete mode 100644 api/tests/app/test_area_occupancy_rules.py delete mode 100644 api/tests/data/mocked_data/data/processor/config/areas/ALL.json delete mode 100755 libs/area_engine.py delete mode 100644 libs/area_threading.py delete mode 100644 libs/entities/area.py delete mode 100644 libs/loggers/area_loggers/__init__.py delete mode 100644 libs/loggers/area_loggers/file_system_logger.py delete mode 100644 libs/loggers/area_loggers/logger.py delete mode 100644 libs/metrics/occupancy.py diff --git a/api/models/area.py b/api/models/area.py deleted file mode 100644 index 442c7875..00000000 --- a/api/models/area.py +++ /dev/null @@ -1,18 +0,0 @@ -from pydantic import Field -from typing import List, Optional - -from .base import EntityConfigDTO, NotificationConfig, SnakeModel -from .occupancy_rule import OccupancyRuleListDTO - - -class AreaNotificationConfig(NotificationConfig): - occupancyThreshold: Optional[int] = Field(0, example=300) - - -class AreaConfigDTO(EntityConfigDTO, AreaNotificationConfig): - cameras: Optional[str] = Field("", example='cam0,cam1') - occupancy_rules: Optional[OccupancyRuleListDTO] = Field([], example=[]) - - -class AreasListDTO(SnakeModel): - areas: List[AreaConfigDTO] diff --git a/api/models/area_logger.py b/api/models/area_logger.py deleted file mode 100644 index 20a45423..00000000 --- a/api/models/area_logger.py +++ /dev/null @@ -1,31 +0,0 @@ -from pydantic import Field, validator -from typing import List, Optional - -from .base import OptionalSectionConfig, SnakeModel - - -class AreaLoggerDTO(OptionalSectionConfig): - logDirectory: Optional[str] = Field(example="/repo/data/processor/static/data/areas") - - @validator("name") - def validate_name(cls, value): - if value != "file_system_logger": - raise ValueError(f"Not supported logger named: {value}") - return value - - -class FileSystemLoggerDTO(OptionalSectionConfig): - logDirectory: str = Field("/repo/data/processor/static/data/sources", - example="/repo/data/processor/static/data/areas") - - -class AreaLoggerListDTO(SnakeModel): - areasLoggers: List[AreaLoggerDTO] - - -def validate_logger(logger: AreaLoggerDTO): - logger_model = None - if logger.name == "file_system_logger": - logger_model = FileSystemLoggerDTO - # Validate that the specific logger's fields are correctly set - logger_model(**logger.dict()) diff --git a/api/models/config.py b/api/models/config.py index 734489d3..833c2b26 100644 --- a/api/models/config.py +++ b/api/models/config.py @@ -5,8 +5,6 @@ from .app import AppDTO from .api import ApiDTO -from .area import AreaConfigDTO -from .area_logger import AreaLoggerDTO from .base import SnakeModel from .camera import CameraDTO from .classifier import ClassifierDTO @@ -23,13 +21,11 @@ class ConfigDTO(SnakeModel): api: ApiDTO core: CoreDTO cameras: List[CameraDTO] - areas: Optional[List[AreaConfigDTO]] = [] detector: DetectorDTO classifier: Optional[ClassifierDTO] tracker: TrackerDTO sourcePostProcessors: List[SourcePostProcessorDTO] sourceLoggers: List[SourceLoggerDTO] - areaLoggers: Optional[List[AreaLoggerDTO]] = [] periodicTasks: Optional[List[PeriodicTaskDTO]] = [] diff --git a/api/models/export.py b/api/models/export.py index fc36d29f..bfa7d1e4 100644 --- a/api/models/export.py +++ b/api/models/export.py @@ -20,8 +20,6 @@ class ExportDataType(str, Enum): class ExportDTO(SnakeModel): - areas: Optional[List[str]] = Field([], example=["area1", "area2", "area3"]) - all_areas: Optional[bool] = Field(False, example=True) cameras: Optional[List[str]] = Field([], example=["camera1", "camera2"]) all_cameras: Optional[bool] = Field(False, example=True) from_date: Optional[date] = Field(None, example="2020-12-01") @@ -44,9 +42,8 @@ def validate_dates(cls, values): @root_validator def validate_entities(cls, values): - if not any([values.get("areas"), values.get("all_areas"), values.get("cameras"), - values.get("all_cameras")]): - logger.info("No cameras or areas were provided.") - raise ValueError("No cameras or areas were provided. You need to provide unless one camera or " - "area to call the export endpoint.") + if not any([values.get("cameras"), values.get("all_cameras")]): + logger.info("No cameras were provided.") + raise ValueError("No cameras were provided. You need to provide unless one camera " + "to call the export endpoint.") return values diff --git a/api/models/occupancy_rule.py b/api/models/occupancy_rule.py deleted file mode 100644 index 7d9b8b29..00000000 --- a/api/models/occupancy_rule.py +++ /dev/null @@ -1,107 +0,0 @@ -from pydantic import Field, validator, root_validator -from typing import List, Optional -from datetime import time - -from libs.entities.occupancy_rule import date_before, occ_str_to_time -from .base import SnakeModel - -import logging -logger = logging.getLogger(__name__) - - -class AreaOccupancyRule(SnakeModel): - days: List[bool] - start_hour: str = Field(example="08:00") - start_time: Optional[time] = None - finish_hour: str = Field(example="12:00") - finish_time: Optional[time] = None - max_occupancy: int = Field(example=100) - - @classmethod - def _valid_time(cls, value: str): - try: - t = occ_str_to_time(value) - if not t: - return False - return t - except: # noqa - return False - - @root_validator(pre=True) - def check_hours(cls, values): - assert "start_hour" in values, "start_hour is a required field" - assert "finish_hour" in values, "finish_hour is a required field" - return values - - @validator('days') - def days_must_be_seven(cls, days): - if len(days) != 7: - raise ValueError("'days' must contain 7 bool values") - return days - - @validator('start_time', always=True, pre=False) - def start_hour_must_be_valid(cls, v, values): - t = cls._valid_time(values['start_hour']) - if not t: - raise ValueError("'start_hour' is not in valid format") - return t - - @validator('finish_time', always=True, pre=False) - def finish_hour_must_be_valid(cls, v, values): - t = cls._valid_time(values["finish_hour"]) - if not t: - raise ValueError("'finish_hour' is not in valid format") - if "start_hour" in values and not date_before(occ_str_to_time(values["start_hour"]), t): - raise ValueError("'finish_hour' must be later than 'start_hour'") - return t - - @validator('max_occupancy') - def max_occupancy_must_be_positive(cls, max_occupancy): - if max_occupancy < 0: - raise ValueError("'max_occupancy' must be > 0") - return max_occupancy - - def to_store_json(self): - return { - "days": list(map(int, self.days)), - "start_hour": self.start_hour, - "finish_hour": self.finish_hour, - "max_occupancy": self.max_occupancy - } - - -class OccupancyRuleListDTO(SnakeModel): - __root__: List[AreaOccupancyRule] - - def __iter__(self): - return iter(self.__root__) - - def __getitem__(self, item): - return self.__root__[item] - - @validator('__root__') - def validate_no_overlaps(cls, the_list): - for l1 in the_list: - for l2 in the_list: - if l1 != l2 and do_overlap(l1, l2): - raise ValueError("Occupancy rules must not overlap!") - return the_list - - def to_store_json(self): - return {"occupancy_rules": [r.to_store_json() for r in self]} - - @classmethod - def from_store_json(cls, json_value): - if "occupancy_rules" not in json_value: - return [] - objs = [AreaOccupancyRule.parse_obj(v) for v in json_value["occupancy_rules"]] - return objs - - -def do_overlap(a: AreaOccupancyRule, b: AreaOccupancyRule): - for d in range(7): - if a.days[d] and b.days[d]: - if (date_before(b.start_time, a.finish_time, strict=True) and - date_before(a.start_time, b.finish_time, strict=True)): - return True - return False diff --git a/api/processor_api.py b/api/processor_api.py index 075f23be..f3845cab 100644 --- a/api/processor_api.py +++ b/api/processor_api.py @@ -9,16 +9,12 @@ from fastapi.openapi.utils import get_openapi from share.commands import Commands -from libs.utils.loggers import get_area_log_directory, get_source_log_directory, get_screenshots_directory, \ - get_config_source_directory, get_config_areas_directory -from api.utils import bad_request_serializer +from libs.utils.loggers import get_source_log_directory, get_screenshots_directory, get_config_source_directory from .dependencies import validate_token from .queue_manager import QueueManager from .routers.app import app_router, dashboard_sync_router from .routers.api import api_router -from .routers.areas import areas_router -from .routers.area_loggers import area_loggers_router from .routers.auth import auth_router from .routers.core import core_router from .routers.cameras import cameras_router @@ -26,7 +22,7 @@ from .routers.config import config_router from .routers.detector import detector_router from .routers.export import export_router -from .routers.metrics import area_metrics_router, camera_metrics_router +from .routers.metrics import camera_metrics_router from .routers.periodic_tasks import periodic_tasks_router from .routers.slack import slack_router from .routers.source_loggers import source_loggers_router @@ -60,8 +56,6 @@ def __init__(self): def create_fastapi_app(self): os.environ["SourceLogDirectory"] = get_source_log_directory(self.settings.config) os.environ["SourceConfigDirectory"] = get_config_source_directory(self.settings.config) - os.environ["AreaLogDirectory"] = get_area_log_directory(self.settings.config) - os.environ["AreaConfigDirectory"] = get_config_areas_directory(self.settings.config) os.environ["ScreenshotsDirectory"] = get_screenshots_directory(self.settings.config) os.environ["HeatmapResolution"] = self.settings.config.get_section_dict("App")["HeatmapResolution"] @@ -75,7 +69,6 @@ def create_fastapi_app(self): app.include_router(config_router, prefix="/config", tags=["Config"], dependencies=dependencies) app.include_router(cameras_router, prefix="/cameras", tags=["Cameras"], dependencies=dependencies) - app.include_router(areas_router, prefix="/areas", tags=["Areas"], dependencies=dependencies) app.include_router(app_router, prefix="/app", tags=["App"], dependencies=dependencies) app.include_router(dashboard_sync_router, prefix="/app", tags=["App"]) app.include_router(api_router, prefix="/api", tags=["Api"], dependencies=dependencies) @@ -86,9 +79,7 @@ def create_fastapi_app(self): app.include_router(source_post_processors_router, prefix="/source_post_processors", tags=["Source Post Processors"], dependencies=dependencies) app.include_router(source_loggers_router, prefix="/source_loggers", tags=["Source Loggers"], dependencies=dependencies) - app.include_router(area_loggers_router, prefix="/area_loggers", tags=["Area Loggers"], dependencies=dependencies) app.include_router(periodic_tasks_router, prefix="/periodic_tasks", tags=["Periodic Tasks"], dependencies=dependencies) - app.include_router(area_metrics_router, prefix="/metrics/areas", tags=["Metrics"], dependencies=dependencies) app.include_router(camera_metrics_router, prefix="/metrics/cameras", tags=["Metrics"], dependencies=dependencies) app.include_router(export_router, prefix="/export", tags=["Export"], dependencies=dependencies) app.include_router(slack_router, prefix="/slack", tags=["Slack"], dependencies=dependencies) diff --git a/api/routers/area_loggers.py b/api/routers/area_loggers.py deleted file mode 100644 index 70abf14c..00000000 --- a/api/routers/area_loggers.py +++ /dev/null @@ -1,124 +0,0 @@ -from fastapi import APIRouter, status -from pydantic import ValidationError -from starlette.exceptions import HTTPException -from typing import Optional - -from api.models.area_logger import AreaLoggerDTO, AreaLoggerListDTO, validate_logger -from api.utils import ( - extract_config, handle_response, update_config, - map_section_from_config, map_to_config_file_format, bad_request_serializer -) - -area_loggers_router = APIRouter() - - -def get_area_loggers(): - config = extract_config(config_type="area_loggers") - return [map_section_from_config(x, config) for x in config.keys()] - - -@area_loggers_router.get("", response_model=AreaLoggerListDTO, - response_model_exclude_none=True) -def list_area_loggers(): - """ - Returns the list of area logger configured in the processor. - """ - return { - "areasLoggers": get_area_loggers() - } - - -@area_loggers_router.get("/{logger_name}", response_model=AreaLoggerDTO, - response_model_exclude_none=True) -def get_area_loggerss(logger_name: str): - """ - Returns the configuration related to the area logger . - """ - logger = next((ps for ps in get_area_loggers() if ps["name"] == logger_name), None) - if not logger: - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, - detail=f"The logger: {logger_name} does not exist") - return logger - - -@area_loggers_router.post("", response_model=AreaLoggerDTO, - status_code=status.HTTP_201_CREATED, response_model_exclude_none=True) -async def create_logger(new_logger: AreaLoggerDTO, reboot_processor: Optional[bool] = True): - """ - Adds an area logger. - """ - config_dict = extract_config() - loggers_index = [int(x[-1]) for x in config_dict.keys() if x.startswith("AreaLogger_")] - loggers = get_area_loggers() - try: - validate_logger(new_logger) - except ValidationError as e: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=bad_request_serializer(str(e)) - ) - if new_logger.name in [ps["name"] for ps in loggers]: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=bad_request_serializer("Logger already exists", error_type="config duplicated logger") - ) - logger_file = map_to_config_file_format(new_logger, True) - index = 0 - if loggers_index: - index = max(loggers_index) + 1 - config_dict[f"AreaLogger_{index}"] = logger_file - success = update_config(config_dict, reboot_processor) - if not success: - return handle_response(logger_file, success, status.HTTP_201_CREATED) - return next((ps for ps in get_area_loggers() if ps["name"] == logger_file["Name"]), None) - - -@area_loggers_router.put("/{logger_name}", response_model=AreaLoggerDTO) -async def edit_logger(logger_name: str, edited_logger: AreaLoggerDTO, - reboot_processor: Optional[bool] = True): - """ - Edits the configuration related to the area logger - """ - edited_logger.name = logger_name - config_dict = extract_config() - edited_logger_section = next(( - key for key, value in config_dict.items() - if key.startswith("AreaLogger_") and value["Name"] == logger_name - ), None) - if not edited_logger_section: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"The logger: {logger_name} does not exist") - try: - validate_logger(edited_logger) - except ValidationError as e: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=bad_request_serializer(str(e)) - ) - logger_file = map_to_config_file_format(edited_logger, True) - config_dict[edited_logger_section] = logger_file - success = update_config(config_dict, reboot_processor) - if not success: - return handle_response(logger_file, success) - return next((ps for ps in get_area_loggers() if ps["name"] == logger_name), None) - - -@area_loggers_router.delete("/{logger_name}", status_code=status.HTTP_204_NO_CONTENT) -async def delete_logger(logger_name: str, reboot_processor: Optional[bool] = True): - """ - Deletes the configuration related to the area logger - """ - config_dict = extract_config() - logger_section = next(( - key for key, value in config_dict.items() - if key.startswith("AreaLogger_") and value["Name"] == logger_name - ), None) - if not logger_section: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"The logger: {logger_name} does not exist") - - config_dict.pop(logger_section) - success = update_config(config_dict, reboot_processor) - return handle_response(None, success, status.HTTP_204_NO_CONTENT) diff --git a/api/routers/areas.py b/api/routers/areas.py deleted file mode 100644 index e773bdcc..00000000 --- a/api/routers/areas.py +++ /dev/null @@ -1,295 +0,0 @@ -import logging -import os -import re -import shutil -import json -from pathlib import Path - -from fastapi import APIRouter, status -from starlette.exceptions import HTTPException -from typing import Optional - -from api.models.area import AreaConfigDTO, AreasListDTO -from constants import ALL_AREAS -from .cameras import map_camera, get_cameras -from api.models.occupancy_rule import OccupancyRuleListDTO -from libs.utils import config as config_utils -from api.utils import ( - extract_config, get_config, handle_response, reestructure_areas, update_config, map_section_from_config, - map_to_config_file_format, bad_request_serializer -) - -areas_router = APIRouter() - - -def get_areas(): - config = extract_config(config_type="areas") - return [map_section_from_config(x, config) for x in config.keys()] - - -@areas_router.get("", response_model=AreasListDTO) -async def list_areas(): - """ - Returns the list of areas managed by the processor. - """ - return { - "areas": get_areas() - } - - -def area_all_data(): - config = get_config() - area_all = config.get_area_all() - - if area_all is None: - raise HTTPException(status_code=status.HTTP_501_NOT_IMPLEMENTED, detail=f"The area: 'ALL' does not exist") - - return { - "violation_threshold": area_all.violation_threshold, - "notify_every_minutes": area_all.notify_every_minutes, - "emails": ",".join(area_all.emails), - "enable_slack_notifications": area_all.enable_slack_notifications, - "daily_report": area_all.daily_report, - "daily_report_time": area_all.daily_report_time, - "occupancy_threshold": area_all.occupancy_threshold, - "id": area_all.id, - "name": area_all.name, - "cameras": ",".join(area_all.cameras) - } - - -@areas_router.get("/{area_id}", response_model=AreaConfigDTO) -async def get_area(area_id: str): - """ - Returns the configuration related to the area - """ - if area_id.upper() == ALL_AREAS: - area = area_all_data() - else: - area = next((area for area in get_areas() if area["id"] == area_id), None) - if not area: - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"The area: {area_id} does not exist") - area["occupancy_rules"] = get_area_occupancy_rules(area["id"]) - return area - - -@areas_router.post("", response_model=AreaConfigDTO, status_code=status.HTTP_201_CREATED) -async def create_area(new_area: AreaConfigDTO, reboot_processor: Optional[bool] = True): - """ - Adds a new area to the processor. - """ - # TODO: We have to autogenerate the ID. - config = get_config() - areas = config.get_areas() - if new_area.id in [area.id for area in areas]: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=bad_request_serializer("Area already exists", error_type="config duplicated area") - ) - elif new_area.id.upper() == ALL_AREAS: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=bad_request_serializer("Area with ID: 'ALL' is not valid.", error_type="Invalid ID") - ) - - cameras = config.get_video_sources() - camera_ids = [camera.id for camera in cameras] - if not all(x in camera_ids for x in new_area.cameras.split(",")): - non_existent_cameras = set(new_area.cameras.split(",")) - set(camera_ids) - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"The cameras: {non_existent_cameras} do not exist") - occupancy_rules = new_area.occupancy_rules - del new_area.occupancy_rules - area_dict = map_to_config_file_format(new_area) - - config_dict = extract_config() - config_dict[f"Area_{len(areas)-1}"] = area_dict - success = update_config(config_dict, reboot_processor) - - if occupancy_rules: - set_occupancy_rules(new_area.id, occupancy_rules) - - if not success: - return handle_response(area_dict, success, status.HTTP_201_CREATED) - - area_directory = os.path.join(os.getenv("AreaLogDirectory"), new_area.id, "occupancy_log") - Path(area_directory).mkdir(parents=True, exist_ok=True) - area_config_directory = os.path.join(os.getenv("AreaConfigDirectory"), new_area.id) - Path(area_config_directory).mkdir(parents=True, exist_ok=True) - - # known issue: Occupancy rules not returned - return next((area for area in get_areas() if area["id"] == area_dict["Id"]), None) - - -def modify_area_all(area_information): - """ - Edits the configuration related to the area "ALL", an area that contains all cameras. - """ - config = get_config() - config_path = config.get_area_config_path(ALL_AREAS) - - json_content = { - "global_area_all": { - "ViolationThreshold": area_information.violationThreshold, - "NotifyEveryMinutes": area_information.notifyEveryMinutes, - "Emails": area_information.emails, - "EnableSlackNotifications": area_information.enableSlackNotifications, - "DailyReport": area_information.dailyReport, - "DailyReportTime": area_information.dailyReportTime, - "OccupancyThreshold": area_information.occupancyThreshold, - "Id": ALL_AREAS, - "Name": ALL_AREAS, - } - } - - if not os.path.exists(config_path): - # Create the file with if necessary - with open(config_path, 'x') as outfile: - json.dump({"global_area_all": {}}, outfile) - - with open(config_path, "r") as file: - file_content = json.load(file) - - file_content["global_area_all"] = json_content["global_area_all"] - - with open(config_path, "w") as file: - json.dump(file_content, file) - - area_all = config.get_area_all() - json_content["global_area_all"]["Cameras"] = ",".join(area_all.cameras) - - return {re.sub(r'(? - """ - if area_id.upper() == ALL_AREAS: - area = modify_area_all(edited_area) - if edited_area.occupancy_rules: - set_occupancy_rules(ALL_AREAS, edited_area.occupancy_rules) - else: - delete_area_occupancy_rules(ALL_AREAS) - area["occupancy_rules"] = get_area_occupancy_rules(ALL_AREAS) - return area - - edited_area.id = area_id - config_dict = extract_config() - area_names = [x for x in config_dict.keys() if x.startswith("Area_")] - areas = [map_section_from_config(x, config_dict) for x in area_names] - areas_ids = [area["id"] for area in areas] - try: - index = areas_ids.index(area_id) - except ValueError: - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"The area: {area_id} does not exist") - - cameras = [x for x in config_dict.keys() if x.startswith("Source_")] - cameras = [map_camera(x, config_dict, []) for x in cameras] - camera_ids = [camera["id"] for camera in cameras] - if not all(x in camera_ids for x in edited_area.cameras.split(",")): - non_existent_cameras = set(edited_area.cameras.split(",")) - set(camera_ids) - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"The cameras: {non_existent_cameras}" - f"do not exist") - - occupancy_rules = edited_area.occupancy_rules - del edited_area.occupancy_rules - - area_dict = map_to_config_file_format(edited_area) - config_dict[f"Area_{index}"] = area_dict - success = update_config(config_dict, reboot_processor) - - if occupancy_rules: - set_occupancy_rules(edited_area.id, occupancy_rules) - else: - delete_area_occupancy_rules(area_id) - - if not success: - return handle_response(area_dict, success) - area = next((area for area in get_areas() if area["id"] == area_id), None) - area["occupancy_rules"] = get_area_occupancy_rules(area["id"]) - return area - - -@areas_router.delete("/{area_id}", status_code=status.HTTP_204_NO_CONTENT) -async def delete_area(area_id: str, reboot_processor: Optional[bool] = True): - """ - Deletes the configuration related to the area - """ - if area_id.upper() == ALL_AREAS: - delete_area_occupancy_rules(ALL_AREAS) - raise HTTPException( - status_code=status.HTTP_202_ACCEPTED, - detail="Area with ID: 'ALL' cannot be deleted. However, its occupancy rules were deleted." - ) - config_dict = extract_config() - areas_name = [x for x in config_dict.keys() if x.startswith("Area_")] - areas = [map_section_from_config(x, config_dict) for x in areas_name] - areas_ids = [area["id"] for area in areas] - try: - index = areas_ids.index(area_id) - except ValueError: - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"The area: {area_id} does not exist") - - config_dict.pop(f"Area_{index}") - config_dict = reestructure_areas(config_dict) - - success = update_config(config_dict, reboot_processor) - - delete_area_occupancy_rules(area_id) - - area_directory = os.path.join(os.getenv("AreaLogDirectory"), area_id) - shutil.rmtree(area_directory) - area_config_directory = os.path.join(os.getenv("AreaConfigDirectory"), area_id) - shutil.rmtree(area_config_directory) - - return handle_response(None, success, status.HTTP_204_NO_CONTENT) - - -def get_area_occupancy_rules(area_id: str): - """ - Returns time-based occupancy rules for an area. - """ - config = get_config() - areas = config.get_areas() - area = next((area for area in areas if area.id == area_id), None) - if not area: - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"The area: {area_id} does not exist") - area_config_path = area.get_config_path() - - if not os.path.exists(area_config_path): - return [] - - with open(area_config_path, "r") as area_file: - rules_data = json.load(area_file) - return OccupancyRuleListDTO.from_store_json(rules_data) - - -def set_occupancy_rules(area_id: str, rules): - area_config_path = get_config().get_area_config_path(area_id) - Path(os.path.dirname(area_config_path)).mkdir(parents=True, exist_ok=True) - - if os.path.exists(area_config_path): - with open(area_config_path, "r") as area_file: - data = json.load(area_file) - else: - data = {} - - with open(area_config_path, "w") as area_file: - data["occupancy_rules"] = rules.to_store_json()["occupancy_rules"] - json.dump(data, area_file) - - -def delete_area_occupancy_rules(area_id: str): - area_config_path = get_config().get_area_config_path(area_id) - - if os.path.exists(area_config_path): - with open(area_config_path, "r") as area_file: - data = json.load(area_file) - else: - return handle_response(None, False) - - with open(area_config_path, "w") as area_file: - if data.get("occupancy_rules") is not None: - del data["occupancy_rules"] - json.dump(data, area_file) diff --git a/api/routers/cameras.py b/api/routers/cameras.py index 4ba2018f..98166ca8 100644 --- a/api/routers/cameras.py +++ b/api/routers/cameras.py @@ -17,7 +17,7 @@ from api.settings import Settings from api.utils import ( - extract_config, get_config, handle_response, reestructure_areas, restart_processor, + extract_config, get_config, handle_response, restart_processor, update_config, map_section_from_config, map_to_config_file_format, bad_request_serializer ) from api.models.camera import (CameraDTO, CamerasListDTO, CreateCameraDTO, ImageModel, VideoLiveFeedModel, @@ -87,24 +87,6 @@ def get_camera_default_image_string(camera_id): with open(image_path, "rb") as image_file: return base64.b64encode(image_file.read()) - -def delete_camera_from_areas(camera_id, config_dict): - areas = {key: config_dict[key] for key in config_dict.keys() if key.startswith("Area_")} - for key, area in areas.items(): - cameras = area["Cameras"].split(",") - if camera_id in cameras: - cameras.remove(camera_id) - if len(cameras) == 0: - logger.warning(f'After removing the camera "{camera_id}", the area "{area["Id"]} - {area["Name"]}" \ - "was left with no cameras and deleted') - config_dict.pop(key) - else: - config_dict[key]["Cameras"] = ",".join(cameras) - - config_dict = reestructure_areas(config_dict) - return config_dict - - def reestructure_cameras(config_dict): """Ensure that all [Source_0, Source_1, ...] are consecutive""" source_names = [x for x in config_dict.keys() if x.startswith("Source_")] @@ -239,7 +221,6 @@ async def delete_camera(camera_id: str, reboot_processor: Optional[bool] = True) """ config_dict = extract_config() index = get_camera_index(config_dict, camera_id) - config_dict = delete_camera_from_areas(camera_id, config_dict) config_dict.pop(f"Source_{index}") config_dict = reestructure_cameras((config_dict)) success = update_config(config_dict, reboot_processor) diff --git a/api/routers/config.py b/api/routers/config.py index 514248a2..8ff266fc 100644 --- a/api/routers/config.py +++ b/api/routers/config.py @@ -20,11 +20,6 @@ def map_to_file_format(config_dto: ConfigDTO): config_dict = dict() config_dict["App"] = map_to_config_file_format(config_dto.app) config_dict["CORE"] = map_to_config_file_format(config_dto.core) - for count, area in enumerate(config_dto.areas): - a_cfg = map_to_config_file_format(area) - if "Occupancy_rules" in a_cfg: - del a_cfg["Occupancy_rules"] - config_dict["Area_" + str(count)] = a_cfg for count, camera in enumerate(config_dto.cameras): config_dict["Source_" + str(count)] = map_to_camera_file_format(camera) config_dict["Detector"] = map_to_config_file_format(config_dto.detector) @@ -36,8 +31,6 @@ def map_to_file_format(config_dto: ConfigDTO): source_post_processor, True) for count, source_logger in enumerate(config_dto.sourceLoggers): config_dict["SourceLogger_" + str(count)] = map_to_config_file_format(source_logger, True) - for count, area_logger in enumerate(config_dto.areaLoggers): - config_dict["AreaLogger_" + str(count)] = map_to_config_file_format(area_logger, True) for count, periodic_task in enumerate(config_dto.periodicTasks): config_dict["PeriodicTask_" + str(count)] = map_to_config_file_format(periodic_task, True) return config_dict @@ -45,23 +38,19 @@ def map_to_file_format(config_dto: ConfigDTO): def map_config(config, options): cameras_name = [x for x in config.keys() if x.startswith("Source_")] - areas_name = [x for x in config.keys() if x.startswith("Area_")] source_post_processor = [x for x in config.keys() if x.startswith("SourcePostProcessor_")] source_loggers = [x for x in config.keys() if x.startswith("SourceLogger_")] - area_loggers = [x for x in config.keys() if x.startswith("AreaLogger_")] periodic_tasks = [x for x in config.keys() if x.startswith("PeriodicTask_")] return { "app": map_section_from_config("App", config), "api": map_section_from_config("API", config), "core": map_section_from_config("CORE", config), "cameras": [map_camera(x, config, options) for x in cameras_name], - "areas": [map_section_from_config(x, config) for x in areas_name], "detector": map_section_from_config("Detector", config), "classifier": map_section_from_config("Classifier", config), "tracker": map_section_from_config("Tracker", config), "sourcePostProcessors": [map_section_from_config(x, config) for x in source_post_processor], "sourceLoggers": [map_section_from_config(x, config) for x in source_loggers], - "areaLoggers": [map_section_from_config(x, config) for x in area_loggers], "periodicTasks": [map_section_from_config(x, config) for x in periodic_tasks], } diff --git a/api/routers/export.py b/api/routers/export.py index e4ae7b80..f0deed65 100644 --- a/api/routers/export.py +++ b/api/routers/export.py @@ -12,7 +12,7 @@ from api.models.export import ExportDTO, ExportDataType from api.utils import extract_config, clean_up_file -from libs.metrics import FaceMaskUsageMetric, OccupancyMetric, SocialDistancingMetric, InOutMetric, DwellTimeMetric +from libs.metrics import FaceMaskUsageMetric, SocialDistancingMetric, InOutMetric, DwellTimeMetric logger = logging.getLogger(__name__) @@ -49,32 +49,9 @@ def export_folder_into_zip(source_path, destination_path, zip_file, from_date, t zip_file.write(os.path.join(source_path, filename), arcname=os.path.join(destination_path, filename)) -def get_areas_to_export(export_info: ExportDTO) -> List[Tuple[str, str]]: +def get_cameras_to_export(export_info: ExportDTO) -> List[Tuple[str, str]]: """ - Returns the list of areas (area_id, area_name) requested in the . - """ - all_areas = extract_config("areas").values() - selected_areas = [] - if export_info.all_areas: - selected_areas = all_areas - else: - selected_areas = [a for a in all_areas if a["Id"] in export_info.areas] - if len(selected_areas) != len(export_info.areas): - # Some of the selected areas don't exist - missing_areas = set(export_info.areas) - set([a["Id"]for a in selected_areas]) - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Areas with ids {missing_areas} don't exist." - ) - if selected_areas: - return [(area["Id"], area["Name"]) for area in selected_areas] - return [] - - -def get_cameras_to_export(export_info: ExportDTO, areas: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - """ - Returns the list of cameras (camera_id, camera_list) requested in the and the cameras - included in the . + Returns the list of cameras (camera_id, camera_list) requested in the """ all_cameras = extract_config("cameras").values() selected_cameras = [] @@ -89,14 +66,6 @@ def get_cameras_to_export(export_info: ExportDTO, areas: List[Tuple[str, str]]) status_code=status.HTTP_404_NOT_FOUND, detail=f"Cameras with ids {missing_cameras} don't exist." ) - # Include areas' cameras - areas_cameras = [] - areas_ids = [a[0] for a in areas] - for area in [a for a in extract_config("areas").values() if a['Id'] in areas_ids]: - areas_cameras.extend(area["Cameras"].split(",")) - selected_cameras.extend( - [c for c in all_cameras if c["Id"] in areas_cameras and c["Id"] not in export_info.cameras] - ) if selected_cameras: return [(camera["Id"], camera["Name"]) for camera in selected_cameras] return [] @@ -161,51 +130,23 @@ def export_camera_data_into_file(export_info: ExportDTO, camera_id: str, camera_ ) -def export_area_data_into_file(export_info: ExportDTO, area_id: str, area_name: str, zip_file: str) -> None: - """ - Includes into the all the information requested in the for the area . - """ - if ALL_DATA in export_info.data_types or RAW_DATA in export_info.data_types: - occupancy_logs_path = os.path.join(os.getenv("AreaLogDirectory"), area_id, "occupancy_log") - export_folder_into_zip( - occupancy_logs_path, - os.path.join("areas", f"{area_id}-{area_name}", "raw_data"), - zip_file, - export_info.from_date, - export_info.to_date - ) - if ALL_DATA in export_info.data_types or OCCUPANCY in export_info.data_types: - occupancy_report_folder = f"reports/{OccupancyMetric.reports_folder}" - occupancy_report_path = os.path.join(os.getenv("AreaLogDirectory"), area_id, occupancy_report_folder) - export_folder_into_zip( - occupancy_report_path, - os.path.join("areas", f"{area_id}-{area_name}", occupancy_report_folder), - zip_file, - export_info.from_date, - export_info.to_date - ) - - @export_router.put("") async def export(export_info: ExportDTO, background_tasks: BackgroundTasks): """ Returns a zip file containing the CSV files for the requested data. The endpoint allows filtering by: - - *Entity*: (areas or cameras). + - *Entity*: (cameras). - *Dates*: (only include data for the specified date range). - *Data Type*: (the type of information that you want to export. The available values are raw_data, occupancy, social-distancing, facemask-usage, in-out and all_data) """ - areas = get_areas_to_export(export_info) - cameras = get_cameras_to_export(export_info, areas) + cameras = get_cameras_to_export(export_info) temp_dir = tempfile.mkdtemp() export_filename = f"export-{date.today()}.zip" zip_path = os.path.join(temp_dir, export_filename) with ZipFile(zip_path, 'w', compression=ZIP_DEFLATED) as export_zip: for (cam_id, name) in cameras: export_camera_data_into_file(export_info, cam_id, name, export_zip) - for (area_id, name) in areas: - export_area_data_into_file(export_info, area_id, name, export_zip) background_tasks.add_task(clean_up_file, temp_dir) return FileResponse(zip_path, filename=export_filename) diff --git a/api/routers/metrics/__init__.py b/api/routers/metrics/__init__.py index bd741b6c..332e0daa 100644 --- a/api/routers/metrics/__init__.py +++ b/api/routers/metrics/__init__.py @@ -1,2 +1 @@ -from .area_metrics import metrics_router as area_metrics_router # noqa from .camera_metrics import metrics_router as camera_metrics_router # noqa diff --git a/api/routers/metrics/area_metrics.py b/api/routers/metrics/area_metrics.py deleted file mode 100644 index 6a0300ad..00000000 --- a/api/routers/metrics/area_metrics.py +++ /dev/null @@ -1,272 +0,0 @@ -from datetime import date, timedelta -from fastapi import APIRouter, Query - -from api.models.metrics import ( - FaceMaskDaily, FaceMaskHourly, FaceMaskWeekly, FaceMaskLive, SocialDistancingDaily, SocialDistancingHourly, - SocialDistancingWeekly, SocialDistancingLive, OccupancyHourly, OccupancyDaily, OccupancyLive, OccupancyWeekly, - InOutLive, InOutHourly, InOutDaily, InOutWeekly, - DwellTimeDaily, DwellTimeHourly, DwellTimeLive, DwellTimeWeekly) -from constants import AREAS, FACEMASK_USAGE, OCCUPANCY, SOCIAL_DISTANCING, IN_OUT, DWELL_TIME - -from .metrics import get_live_metric, get_hourly_metric, get_daily_metric, get_weekly_metric - -metrics_router = APIRouter() - - -# Occupancy MetricsAREAS -@metrics_router.get("/occupancy/live", response_model=OccupancyLive) -def get_area_occupancy_live(areas: str = ""): - """ - Returns a report with live information about the occupancy in the areas . - """ - return get_live_metric(AREAS, areas, OCCUPANCY) - - -@metrics_router.get("/occupancy/hourly", response_model=OccupancyHourly) -def get_area_occupancy_hourly_report(areas: str = "", date: date = Query(date.today())): - """ - Returns a hourly report (for the date specified) with information about the occupancy in - the areas . - """ - return get_hourly_metric(AREAS, areas, OCCUPANCY, date) - - -@metrics_router.get("/occupancy/daily", response_model=OccupancyDaily) -def get_area_occupancy_daily_report(areas: str = "", - from_date: date = Query((date.today() - timedelta(days=3))), - to_date: date = Query(date.today())): - """ - Returns a daily report (for the date range specified) with information about the occupancy in - the areas . - """ - return get_daily_metric(AREAS, areas, OCCUPANCY, from_date, to_date) - - -@metrics_router.get("/occupancy/weekly", response_model=OccupancyWeekly) -def get_area_occupancy_weekly_report( - areas: str = "", - weeks: int = Query(0), - from_date: date = Query((date.today() - timedelta(days=date.today().weekday(), weeks=4))), - to_date: date = Query(date.today())): - """ - Returns a weekly report (for the date range specified) with information about the occupancy in - the areas . - - **If `weeks` is provided and is a positive number:** - - `from_date` and `to_date` are ignored. - - Report spans from `weeks*7 + 1` days ago to yesterday. - - Taking yesterday as the end of week. - - **Else:** - - Report spans from `from_Date` to `to_date`. - - Taking Sunday as the end of week - """ - return get_weekly_metric(AREAS, areas, OCCUPANCY, from_date, to_date, weeks) - - -# Social Distancing Metrics -@metrics_router.get("/social-distancing/live", response_model=SocialDistancingLive) -def get_area_social_distancing_live(areas: str = ""): - """ - Returns a report with live information about the social distancing infractions - detected in the areas . - """ - return get_live_metric(AREAS, areas, SOCIAL_DISTANCING) - - -@metrics_router.get("/social-distancing/hourly", response_model=SocialDistancingHourly) -def get_area_distancing_hourly_report(areas: str = "", date: date = Query(date.today())): - """ - Returns a hourly report (for the date specified) with information about the social distancing infractions - detected in the areas . - """ - return get_hourly_metric(AREAS, areas, SOCIAL_DISTANCING, date) - - -@metrics_router.get("/social-distancing/daily", response_model=SocialDistancingDaily) -def get_area_distancing_daily_report(areas: str = "", - from_date: date = Query((date.today() - timedelta(days=3))), - to_date: date = Query(date.today())): - """ - Returns a daily report (for the date range specified) with information about the social distancing infractions - detected in the areas . - """ - return get_daily_metric(AREAS, areas, SOCIAL_DISTANCING, from_date, to_date) - - -@metrics_router.get("/social-distancing/weekly", response_model=SocialDistancingWeekly) -def get_area_distancing_weekly_report( - areas: str = "", - weeks: int = Query(0), - from_date: date = Query((date.today() - timedelta(days=date.today().weekday(), weeks=4))), - to_date: date = Query(date.today())): - """ - Returns a weekly report (for the date range specified) with information about the social distancing - infractions detected in the areas . - - **If `weeks` is provided and is a positive number:** - - `from_date` and `to_date` are ignored. - - Report spans from `weeks*7 + 1` days ago to yesterday. - - Taking yesterday as the end of week. - - **Else:** - - Report spans from `from_Date` to `to_date`. - - Taking Sunday as the end of week - """ - return get_weekly_metric(AREAS, areas, SOCIAL_DISTANCING, from_date, to_date, weeks) - - -# Dwell time Metrics -@metrics_router.get("/dwell-time/live", response_model=DwellTimeLive) -def get_area_dwell_time_live(areas: str = ""): - """ - Returns a report with live information about the dwell time of people - detected in the areas . - """ - return get_live_metric(AREAS, areas, DWELL_TIME) - - -@metrics_router.get("/dwell-time/hourly", response_model=DwellTimeHourly) -def get_area_dwell_time_hourly_report(areas: str = "", date: date = Query(date.today())): - """ - Returns a hourly report (for the date specified) with information about the duel time (⚔) of people - detected in the areas . - """ - return get_hourly_metric(AREAS, areas, DWELL_TIME, date) - - -@metrics_router.get("/dwell-time/daily", response_model=DwellTimeDaily) -def get_area_dwell_time_daily_report(areas: str = "", - from_date: date = Query((date.today() - timedelta(days=3))), - to_date: date = Query(date.today())): - """ - Returns a daily report (for the date range specified) with information about the dwell time of people - detected in the areas . - """ - return get_daily_metric(AREAS, areas, DWELL_TIME, from_date, to_date) - - -@metrics_router.get("/dwell-time/weekly", response_model=DwellTimeWeekly) -def get_area_dwell_time_weekly_report( - areas: str = "", - weeks: int = Query(0), - from_date: date = Query((date.today() - timedelta(days=date.today().weekday(), weeks=4))), - to_date: date = Query(date.today())): - """ - Returns a weekly report (for the date range specified) with information about the ??? - detected in the areas . - - **If `weeks` is provided and is a positive number:** - - `from_date` and `to_date` are ignored. - - Report spans from `weeks*7 + 1` days ago to yesterday. - - Taking yesterday as the end of week. - - **Else:** - - Report spans from `from_Date` to `to_date`. - - Taking Sunday as the end of week - """ - return get_weekly_metric(AREAS, areas, DWELL_TIME, from_date, to_date, weeks) - - -# Face Mask Metrics -@metrics_router.get("/face-mask-detections/live", response_model=FaceMaskLive) -def get_area_face_mask_detections_live(areas: str = ""): - """ - Returns a report with live information about the facemasks detected in the areas . - """ - return get_live_metric(AREAS, areas, FACEMASK_USAGE) - - -@metrics_router.get("/face-mask-detections/hourly", response_model=FaceMaskHourly) -def get_area_face_mask_detections_hourly_report(areas: str = "", date: date = Query(date.today())): - """ - Returns a hourly report (for the date specified) with information about the facemasks detected in - the cameras . - """ - return get_hourly_metric(AREAS, areas, FACEMASK_USAGE, date) - - -@metrics_router.get("/face-mask-detections/daily", response_model=FaceMaskDaily) -def get_area_face_mask_detections_daily_report(areas: str = "", - from_date: date = Query((date.today() - timedelta(days=3))), - to_date: date = Query(date.today())): - """ - Returns a daily report (for the date range specified) with information about the facemasks detected in - the cameras . - """ - return get_daily_metric(AREAS, areas, FACEMASK_USAGE, from_date, to_date) - - -@metrics_router.get("/face-mask-detections/weekly", response_model=FaceMaskWeekly) -def get_area_face_mask_detections_weekly_report( - areas: str = "", - weeks: int = Query(0), - from_date: date = Query((date.today() - timedelta(days=date.today().weekday(), weeks=4))), - to_date: date = Query(date.today())): - """ - Returns a weekly report (for the date range specified) with information about the facemasks detected in - the cameras . - - **If `weeks` is provided and is a positive number:** - - `from_date` and `to_date` are ignored. - - Report spans from `weeks*7 + 1` days ago to yesterday. - - Taking yesterday as the end of week. - - **Else:** - - Report spans from `from_Date` to `to_date`. - - Taking Sunday as the end of week - """ - return get_weekly_metric(AREAS, areas, FACEMASK_USAGE, from_date, to_date, weeks) - - -# In Out Metrics -@metrics_router.get("/in-out/live", response_model=InOutLive) -def get_camera_in_out_live(areas: str = ""): - """ - Returns a report with live information about the in-out flow detected in the - cameras . - """ - return get_live_metric(AREAS, areas, IN_OUT) - - -@metrics_router.get("/in-out/hourly", response_model=InOutHourly) -def get_camera_in_out_hourly_report(areas: str = "", date: date = Query(date.today().isoformat())): - """ - Returns a hourly report (for the date specified) with information about the in-out flow detected in - the cameras . - """ - return get_hourly_metric(AREAS, areas, IN_OUT, date) - - -@metrics_router.get("/in-out/daily", response_model=InOutDaily) -def get_camera_in_out_daily_report( - areas: str = "", - from_date: date = Query((date.today() - timedelta(days=3)).isoformat()), - to_date: date = Query(date.today().isoformat())): - """ - Returns a daily report (for the date range specified) with information about the in-out flow detected in - the cameras . - """ - return get_daily_metric(AREAS, areas, IN_OUT, from_date, to_date) - - -@metrics_router.get("/in-out/weekly", response_model=InOutWeekly) -def get_camera_in_out_weekly_report( - areas: str = "", - weeks: int = Query(0), - from_date: date = Query((date.today() - timedelta(days=date.today().weekday(), weeks=4)).isoformat()), - to_date: date = Query(date.today().isoformat())): - """ - Returns a weekly report (for the date range specified) with information about the in-out flow detected in - the cameras . - - **If `weeks` is provided and is a positive number:** - - `from_date` and `to_date` are ignored. - - Report spans from `weeks*7 + 1` days ago to yesterday. - - Taking yesterday as the end of week. - - **Else:** - - Report spans from `from_Date` to `to_date`. - - Taking Sunday as the end of week - """ - return get_weekly_metric(AREAS, areas, IN_OUT, from_date, to_date, weeks) diff --git a/api/routers/metrics/metrics.py b/api/routers/metrics/metrics.py index 28657787..bb42bdef 100644 --- a/api/routers/metrics/metrics.py +++ b/api/routers/metrics/metrics.py @@ -5,8 +5,8 @@ from typing import Iterator from api.utils import bad_request_serializer, extract_config -from constants import AREAS, CAMERAS, FACEMASK_USAGE, OCCUPANCY, SOCIAL_DISTANCING, IN_OUT, ALL_AREAS, DWELL_TIME -from libs.metrics import FaceMaskUsageMetric, OccupancyMetric, SocialDistancingMetric, InOutMetric, DwellTimeMetric +from constants import CAMERAS, FACEMASK_USAGE, SOCIAL_DISTANCING, IN_OUT, DWELL_TIME +from libs.metrics import FaceMaskUsageMetric, SocialDistancingMetric, InOutMetric, DwellTimeMetric CAMERAS_METRICS = [SOCIAL_DISTANCING, FACEMASK_USAGE, IN_OUT] @@ -30,33 +30,6 @@ def validate_camera_existence(camera_id: str): raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Camera with id '{camera_id}' does not exist") -def get_areas(areas: str) -> Iterator[str]: - if ALL_AREAS in areas.upper().split(","): - return [ALL_AREAS] - if areas: - return areas.split(",") - config = extract_config(config_type=AREAS) - return [x["Id"] for x in config.values()] - - -def get_cameras_for_areas(areas: Iterator[str]) -> Iterator[str]: - if areas == [ALL_AREAS]: - return get_all_cameras() - config = extract_config(config_type=AREAS) - cameras = [] - for area_config in config.values(): - if area_config["Id"] in areas: - cameras.extend(area_config[CAMERAS.capitalize()].split(",")) - return cameras - - -def validate_area_existence(area_id: str): - if area_id != ALL_AREAS: - dir_path = os.path.join(os.getenv("AreaLogDirectory"), area_id, "occupancy_log") - if not os.path.exists(dir_path): - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Area with id '{area_id}' does not exist") - - def validate_dates(from_date: date, to_date: date): if from_date > to_date: raise HTTPException( @@ -68,7 +41,6 @@ def validate_dates(from_date: date, to_date: date): ) ) - def get_entities(entity: str, entities_ids: str, metric: str): entities = [] if entity == CAMERAS: @@ -77,14 +49,7 @@ def get_entities(entity: str, entities_ids: str, metric: str): validate_camera_existence(e) else: # entities == AREAS - entities = get_areas(entities_ids) - for e in entities: - validate_area_existence(e) - if entity == AREAS and metric in CAMERAS_METRICS: - entities = get_cameras_for_areas(entities) - if ALL_AREAS in entities_ids.upper() and entity == AREAS and metric == OCCUPANCY: - # Occupancy is not a CAMERA_METRIC, it is an AREA_METRIC. So we have to return a list with all area ids. - entities = get_areas("") + raise NotImplementedError return entities @@ -95,8 +60,6 @@ def get_metric_class(metric: str): return DwellTimeMetric elif metric == FACEMASK_USAGE: return FaceMaskUsageMetric - elif metric == OCCUPANCY: - return OccupancyMetric elif metric == IN_OUT: return InOutMetric else: diff --git a/api/tests/app/test_area_all.py b/api/tests/app/test_area_all.py deleted file mode 100644 index d6d2c050..00000000 --- a/api/tests/app/test_area_all.py +++ /dev/null @@ -1,291 +0,0 @@ -import pytest -import re -import os -import json -from copy import deepcopy - -from starlette.exceptions import HTTPException -from fastapi import status - -from api.models.occupancy_rule import OccupancyRuleListDTO -from api.tests.utils.common_functions import get_config_file_json -from constants import ALL_AREAS -from libs.utils import config as config_utils -from api.utils import get_config -# The line below is absolutely necessary. Fixtures are passed as arguments to test functions. That is why IDE could -# not recognized them. -from api.tests.utils.fixtures_tests import config_rollback_areas, rollback_area_all_json - - -def get_area_occupancy_rules(area_id): - config = get_config() - areas = config.get_areas() - area = next((area for area in areas if area.id == area_id), None) - if not area: - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"The area: {area_id} does not exist") - area_config_path = area.get_config_path() - - if not os.path.exists(area_config_path): - return OccupancyRuleListDTO.parse_obj([]).__root__ - - with open(area_config_path, "r") as area_file: - rules_data = json.load(area_file) - return OccupancyRuleListDTO.from_store_json(rules_data) - - -def to_boolean_if_possible(dictionary): - result = {} - for key, value in dictionary.items(): - if value in ["false", "true", "False", "True"]: - if value in ["false", "False"]: - value = False - else: - value = True - result[key] = value - return result - - -def expected_response(config_sample_path): - config_directory = config_utils.get_area_config_directory(get_config()) - config_path = os.path.join(config_directory, ALL_AREAS + ".json") - - with open(config_path, "r") as file: - json_content_from_file = json.load(file)["global_area_all"] - - response = {re.sub(r'(? is used: returned response will separate weeks in range taking a number of - weeks ago, considering the week ended yesterday. While when we use from_date and to_date: returned response - will separate weeks in range considering the week starts on Monday - """ - - area, area_2, client, config_sample_path = config_rollback_areas - area_id = area["id"] - weeks = 5 - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id}&weeks={weeks}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("occupancy", { - 'occupancy_threshold': [0, 0, 0, 0, 0, 0], 'average_occupancy': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - 'max_occupancy': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - 'weeks': ['2007-03-08 2007-03-11', '2007-03-12 2007-03-18', '2007-03-19 2007-03-25', - '2007-03-26 2007-04-01', '2007-04-02 2007-04-08', '2007-04-09 2007-04-11'] - }), - ("social-distancing", { - 'detected_objects': [0, 0, 0, 0, 0, 0], 'no_infringement': [0, 0, 0, 0, 0, 0], - 'low_infringement': [0, 0, 0, 0, 0, 0], 'high_infringement': [0, 0, 0, 0, 0, 0], - 'critical_infringement': [0, 0, 0, 0, 0, 0], - 'weeks': ['2007-03-08 2007-03-11', '2007-03-12 2007-03-18', '2007-03-19 2007-03-25', - '2007-03-26 2007-04-01', '2007-04-02 2007-04-08', '2007-04-09 2007-04-11'] - }), - ("face-mask-detections", { - 'no_face': [0, 0, 0, 0, 0, 0], 'face_with_mask': [0, 0, 0, 0, 0, 0], - 'face_without_mask': [0, 0, 0, 0, 0, 0], - 'weeks': ['2007-03-08 2007-03-11', '2007-03-12 2007-03-18', '2007-03-19 2007-03-25', - '2007-03-26 2007-04-01', '2007-04-02 2007-04-08', '2007-04-09 2007-04-11'] - }) - ] - ) - def test_get_a_weekly_report_no_data_for_input_range_of_dates(self, config_rollback_areas, metric, expected): - """ - Remember that whether is used: Separate weeks in range taking a number of weeks ago, considering the - week ended yesterday. While when we use from_date and to_date: Separate weeks in range considering the week - starts on Monday - """ - area, area_2, client, config_sample_path = config_rollback_areas - area_id = area["id"] - from_date = "2007-03-08" - to_date = "2007-04-11" - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_only_from_date(self, config_rollback_areas, metric): - """ - Note that here as we do not send to_date, default value will take place, and to_date will be - date.today(). - WARNING: We could not mock the date.today() when the function is called within default query parameters. - So, we must be careful because the data range will be: "2021-01-10" - "today". - """ - - area, area_2, client, config_sample_path = config_rollback_areas - area_id = area["id"] - from_date = "2021-01-10" - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id}&from_date={from_date}") - - assert response.status_code == 200 - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_only_to_date(self, config_rollback_areas, metric): - """ - Note that here as we do not send from_date, default value will take place, and from_date will be - date.today(). - WARNING: We could not mock the date.today() when the function is called within default query parameters. - So, we must be careful because the data range will be: "date.today() - timedelta(days=date.today().weekday(), - weeks=4)" - "2020-09-20" and this date range is probably wrong because from_date will be later than to_date. - """ - - area, area_2, client, config_sample_path = config_rollback_areas - area_id = area["id"] - to_date = "2020-09-20" - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id}&to_date={to_date}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_one_date_bad_format_I(self, config_rollback_areas, metric): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = area["id"] - from_date = "2007-03-08" - to_date = "BAD_FORMAT" - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_one_date_bad_format_II(self, config_rollback_areas, metric): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = area["id"] - from_date = "BAD_FORMAT" - to_date = "2007-03-08" - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_both_dates_bad_format(self, config_rollback_areas, metric): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = area["id"] - from_date = "2007-03-08" - to_date = "BAD_FORMAT" - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 400 - - # NO AREA - - @pytest.mark.parametrize( - "metric,expected", - [ - ("occupancy", { - 'occupancy_threshold': [140, 290, 290, 290, 140], 'average_occupancy': [0.0, 63.43, 95.71, 87.57, 0.0], - 'max_occupancy': [0.0, 192.0, 235.0, 243.0, 0.0], - 'weeks': ['2020-09-06 2020-09-06', '2020-09-07 2020-09-13', '2020-09-14 2020-09-20', - '2020-09-21 2020-09-27', '2020-09-28 2020-09-28'] - }), - ("social-distancing", { - 'detected_objects': [20, 3170, 2786, 2854, 94], 'no_infringement': [4, 2135, 1882, 1875, 33], - 'low_infringement': [14, 480, 511, 474, 59], 'high_infringement': [2, 504, 300, 402, 2], - 'critical_infringement': [0, 57, 90, 100, 0], - 'weeks': ['2020-09-06 2020-09-06', '2020-09-07 2020-09-13', '2020-09-14 2020-09-20', - '2020-09-21 2020-09-27', '2020-09-28 2020-09-28'] - }), - ("face-mask-detections", { - 'no_face': [14, 719, 862, 571, 122], 'face_with_mask': [88, 2061, 1810, 1893, 91], - 'face_without_mask': [31, 864, 897, 909, 29], - 'weeks': ['2020-09-06 2020-09-06', '2020-09-07 2020-09-13', '2020-09-14 2020-09-20', - '2020-09-21 2020-09-27', '2020-09-28 2020-09-28'] - }) - ] - ) - def test_get_a_weekly_report_properly_no_area(self, config_rollback_areas, metric, expected): - area, area_2, client, config_sample_path = config_rollback_areas - from_date = "2020-09-06" - to_date = "2020-09-28" - - response = client.get(f"/metrics/areas/{metric}/weekly?from_date={from_date}&to_date={to_date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("occupancy", { - 'occupancy_threshold': [0, 140, 290, 290, 290], 'average_occupancy': [0.0, 41.86, 78.86, 92.86, 75.0], - 'max_occupancy': [0.0, 156.0, 235.0, 243.0, 225.0], - 'weeks': ['2020-08-26 2020-09-01', '2020-09-02 2020-09-08', '2020-09-09 2020-09-15', - '2020-09-16 2020-09-22', '2020-09-23 2020-09-29'] - }), - ("social-distancing", { - 'detected_objects': [121, 2352, 3019, 2818, 2961], 'no_infringement': [38, 1511, 2092, 1879, 1935], - 'low_infringement': [22, 437, 433, 518, 539], 'high_infringement': [53, 313, 443, 328, 384], - 'critical_infringement': [8, 88, 57, 90, 100], - 'weeks': ['2020-08-26 2020-09-01', '2020-09-02 2020-09-08', '2020-09-09 2020-09-15', - '2020-09-16 2020-09-22', '2020-09-23 2020-09-29'] - }), - ("face-mask-detections", { - 'no_face': [0, 648, 626, 940, 625], 'face_with_mask': [0, 1373, 2128, 1817, 1858], - 'face_without_mask': [0, 878, 895, 807, 892], - 'weeks': ['2020-08-26 2020-09-01', '2020-09-02 2020-09-08', '2020-09-09 2020-09-15', - '2020-09-16 2020-09-22', '2020-09-23 2020-09-29'] - }) - ] - ) - @freeze_time("2020-09-30") - def test_get_a_weekly_report_properly_weeks_no_area(self, config_rollback_areas, metric, expected): - """ Remember that time was mocked to 2020-09-30. """ - - area, area_2, client, config_sample_path = config_rollback_areas - weeks = 5 - - response = client.get(f"/metrics/areas/{metric}/weekly?&weeks={weeks}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("occupancy", { - 'occupancy_threshold': [0, 140, 290, 290, 290], 'average_occupancy': [0.0, 41.86, 78.86, 92.86, 75.0], - 'max_occupancy': [0.0, 156.0, 235.0, 243.0, 225.0], - 'weeks': ['2020-08-26 2020-09-01', '2020-09-02 2020-09-08', '2020-09-09 2020-09-15', - '2020-09-16 2020-09-22', '2020-09-23 2020-09-29'] - }), - ("social-distancing", { - 'detected_objects': [121, 2352, 3019, 2818, 2961], 'no_infringement': [38, 1511, 2092, 1879, 1935], - 'low_infringement': [22, 437, 433, 518, 539], 'high_infringement': [53, 313, 443, 328, 384], - 'critical_infringement': [8, 88, 57, 90, 100], - 'weeks': ['2020-08-26 2020-09-01', '2020-09-02 2020-09-08', '2020-09-09 2020-09-15', - '2020-09-16 2020-09-22', '2020-09-23 2020-09-29'] - }), - ("face-mask-detections", { - 'no_face': [0, 648, 626, 940, 625], 'face_with_mask': [0, 1373, 2128, 1817, 1858], - 'face_without_mask': [0, 878, 895, 807, 892], - 'weeks': ['2020-08-26 2020-09-01', '2020-09-02 2020-09-08', '2020-09-09 2020-09-15', - '2020-09-16 2020-09-22', '2020-09-23 2020-09-29'] - }) - ] - ) - @freeze_time("2020-09-30") - def test_get_a_weekly_report_properly_weeks_and_valid_ranges_of_dates_no_area(self, config_rollback_areas, metric, - expected): - """ - Remember that time was mocked to 2020-09-30. - In addition, note that from_date and to_date should be ignored accordingly with the description. - """ - - area, area_2, client, config_sample_path = config_rollback_areas - weeks = 5 - from_date = "2020-09-06" - to_date = "2020-09-28" - - response = client.get( - f"/metrics/areas/{metric}/weekly?&weeks={weeks}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_bad_format_weeks_no_area(self, config_rollback_areas, metric): - area, area_2, client, config_sample_path = config_rollback_areas - weeks = "BAD_FORMAT" - - response = client.get(f"/metrics/areas/{metric}/weekly?weeks={weeks}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_bad_format_weeks_and_valid_range_of_dates_no_area(self, config_rollback_areas, - metric): - area, area_2, client, config_sample_path = config_rollback_areas - weeks = "BAD_FORMAT" - from_date = "2020-09-06" - to_date = "2020-09-28" - - response = client.get( - f"/metrics/areas/{metric}/weekly?weeks={weeks}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_from_date_after_to_date_no_area(self, config_rollback_areas, metric): - area, area_2, client, config_sample_path = config_rollback_areas - from_date = "2020-09-17" - to_date = "2020-09-15" - - response = client.get(f"/metrics/areas/{metric}/weekly?from_date={from_date}&to_date={to_date}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric,expected", - [ - ("occupancy", { - 'occupancy_threshold': [0, 0, 0, 0, 0], 'average_occupancy': [0.0, 0.0, 0.0, 0.0, 0.0], - 'max_occupancy': [0.0, 0.0, 0.0, 0.0, 0.0], - 'weeks': ['2007-03-08 2007-03-14', '2007-03-15 2007-03-21', '2007-03-22 2007-03-28', - '2007-03-29 2007-04-04', '2007-04-05 2007-04-11'] - }), - ("social-distancing", { - 'detected_objects': [0, 0, 0, 0, 0], 'no_infringement': [0, 0, 0, 0, 0], - 'low_infringement': [0, 0, 0, 0, 0], 'high_infringement': [0, 0, 0, 0, 0], - 'critical_infringement': [0, 0, 0, 0, 0], - 'weeks': ['2007-03-08 2007-03-14', '2007-03-15 2007-03-21', '2007-03-22 2007-03-28', - '2007-03-29 2007-04-04', '2007-04-05 2007-04-11'] - }), - ("face-mask-detections", { - 'no_face': [0, 0, 0, 0, 0], 'face_with_mask': [0, 0, 0, 0, 0], 'face_without_mask': [0, 0, 0, 0, 0], - 'weeks': ['2007-03-08 2007-03-14', '2007-03-15 2007-03-21', '2007-03-22 2007-03-28', - '2007-03-29 2007-04-04', '2007-04-05 2007-04-11'] - }) - ] - ) - @freeze_time("2007-04-12") - def test_get_a_weekly_report_no_data_for_input_weeks_no_area(self, config_rollback_areas, metric, - expected): - """ - Remember that time was mocked to 2007-04-12. - Note that there is no data for given input. - - Remember that whether is used: returned response will separate weeks in range taking a number of - weeks ago, considering the week ended yesterday. While when we use from_date and to_date: returned response - will separate weeks in range considering the week starts on Monday - """ - - area, area_2, client, config_sample_path = config_rollback_areas - weeks = 5 - - response = client.get(f"/metrics/areas/{metric}/weekly?weeks={weeks}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("occupancy", { - 'occupancy_threshold': [0, 0, 0, 0, 0, 0], 'average_occupancy': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - 'max_occupancy': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - 'weeks': ['2007-03-08 2007-03-11', '2007-03-12 2007-03-18', '2007-03-19 2007-03-25', - '2007-03-26 2007-04-01', '2007-04-02 2007-04-08', '2007-04-09 2007-04-11'] - }), - ("social-distancing", { - 'detected_objects': [0, 0, 0, 0, 0, 0], 'no_infringement': [0, 0, 0, 0, 0, 0], - 'low_infringement': [0, 0, 0, 0, 0, 0], 'high_infringement': [0, 0, 0, 0, 0, 0], - 'critical_infringement': [0, 0, 0, 0, 0, 0], - 'weeks': ['2007-03-08 2007-03-11', '2007-03-12 2007-03-18', '2007-03-19 2007-03-25', - '2007-03-26 2007-04-01', '2007-04-02 2007-04-08', '2007-04-09 2007-04-11'] - }), - ("face-mask-detections", { - 'no_face': [0, 0, 0, 0, 0, 0], 'face_with_mask': [0, 0, 0, 0, 0, 0], - 'face_without_mask': [0, 0, 0, 0, 0, 0], - 'weeks': ['2007-03-08 2007-03-11', '2007-03-12 2007-03-18', '2007-03-19 2007-03-25', - '2007-03-26 2007-04-01', '2007-04-02 2007-04-08', '2007-04-09 2007-04-11'] - }) - ] - ) - def test_get_a_weekly_report_no_data_for_input_range_of_dates_no_area(self, config_rollback_areas, metric, - expected): - """ - Remember that whether is used: Separate weeks in range taking a number of weeks ago, considering the - week ended yesterday. While when we use from_date and to_date: Separate weeks in range considering the week - starts on Monday - """ - area, area_2, client, config_sample_path = config_rollback_areas - from_date = "2007-03-08" - to_date = "2007-04-11" - - response = client.get(f"/metrics/areas/{metric}/weekly?from_date={from_date}&to_date={to_date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_only_from_date_no_area(self, config_rollback_areas, metric): - """ - Note that here as we do not send to_date, default value will take place, and to_date will be - date.today(). - WARNING: We could not mock the date.today() when the function is called within default query parameters. - So, we must be careful because the data range will be: "2021-01-10" - "today". - """ - - area, area_2, client, config_sample_path = config_rollback_areas - from_date = "2021-01-10" - - response = client.get(f"/metrics/areas/{metric}/weekly?from_date={from_date}") - - assert response.status_code == 200 - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_only_to_date_no_area(self, config_rollback_areas, metric): - """ - Note that here as we do not send from_date, default value will take place, and from_date will be - date.today(). - WARNING: We could not mock the date.today() when the function is called within default query parameters. - So, we must be careful because the data range will be: "date.today() - timedelta(days=date.today().weekday(), - weeks=4)" - "2020-09-20" and this date range is probably wrong because from_date will be later than to_date. - """ - - area, area_2, client, config_sample_path = config_rollback_areas - to_date = "2020-09-20" - - response = client.get(f"/metrics/areas/{metric}/weekly?to_date={to_date}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_one_date_bad_format_I_no_area(self, config_rollback_areas, metric): - area, area_2, client, config_sample_path = config_rollback_areas - from_date = "2007-03-08" - to_date = "BAD_FORMAT" - - response = client.get(f"/metrics/areas/{metric}/weekly?from_date={from_date}&to_date={to_date}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_one_date_bad_format_II_no_area(self, config_rollback_areas, metric): - area, area_2, client, config_sample_path = config_rollback_areas - from_date = "BAD_FORMAT" - to_date = "2007-03-08" - - response = client.get(f"/metrics/areas/{metric}/weekly?from_date={from_date}&to_date={to_date}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_both_dates_bad_format_no_area(self, config_rollback_areas, metric): - area, area_2, client, config_sample_path = config_rollback_areas - from_date = "2007-03-08" - to_date = "BAD_FORMAT" - - response = client.get(f"/metrics/areas/{metric}/weekly?from_date={from_date}&to_date={to_date}") - - assert response.status_code == 400 - - # SEVERAL AREAS - - @pytest.mark.parametrize( - "metric,expected", - [ - ("occupancy", { - 'occupancy_threshold': [140, 290, 290, 290, 140], - 'average_occupancy': [0.0, 63.43, 95.71, 87.57, 0.0], - 'max_occupancy': [0.0, 192.0, 235.0, 243.0, 0.0], - 'weeks': ['2020-09-06 2020-09-06', '2020-09-07 2020-09-13', '2020-09-14 2020-09-20', - '2020-09-21 2020-09-27', '2020-09-28 2020-09-28'] - }), - ("social-distancing", { - 'detected_objects': [20, 3170, 2786, 2854, 94], 'no_infringement': [4, 2135, 1882, 1875, 33], - 'low_infringement': [14, 480, 511, 474, 59], 'high_infringement': [2, 504, 300, 402, 2], - 'critical_infringement': [0, 57, 90, 100, 0], - 'weeks': ['2020-09-06 2020-09-06', '2020-09-07 2020-09-13', '2020-09-14 2020-09-20', - '2020-09-21 2020-09-27', '2020-09-28 2020-09-28'] - }), - ("face-mask-detections", { - 'no_face': [14, 719, 862, 571, 122], 'face_with_mask': [88, 2061, 1810, 1893, 91], - 'face_without_mask': [31, 864, 897, 909, 29], - 'weeks': ['2020-09-06 2020-09-06', '2020-09-07 2020-09-13', '2020-09-14 2020-09-20', - '2020-09-21 2020-09-27', '2020-09-28 2020-09-28'] - }) - ] - ) - def test_get_a_weekly_report_properly_two_areas(self, config_rollback_areas, metric, expected): - area, area_2, client, config_sample_path = config_rollback_areas - area_id_1 = area["id"] - area_id_2 = area_2["id"] - from_date = "2020-09-06" - to_date = "2020-09-28" - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id_1},{area_id_2}&from_date={from_date}" - f"&to_date={to_date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("occupancy", { - 'occupancy_threshold': [0, 140, 290, 290, 290], - 'average_occupancy': [0.0, 41.86, 78.86, 92.86, 75.0], - 'max_occupancy': [0.0, 156.0, 235.0, 243.0, 225.0], - 'weeks': ['2020-08-26 2020-09-01', '2020-09-02 2020-09-08', '2020-09-09 2020-09-15', - '2020-09-16 2020-09-22', '2020-09-23 2020-09-29'] - }), - ("social-distancing", { - 'detected_objects': [121, 2352, 3019, 2818, 2961], 'no_infringement': [38, 1511, 2092, 1879, 1935], - 'low_infringement': [22, 437, 433, 518, 539], 'high_infringement': [53, 313, 443, 328, 384], - 'critical_infringement': [8, 88, 57, 90, 100], - 'weeks': ['2020-08-26 2020-09-01', '2020-09-02 2020-09-08', '2020-09-09 2020-09-15', - '2020-09-16 2020-09-22', '2020-09-23 2020-09-29'] - }), - ("face-mask-detections", { - 'no_face': [0, 648, 626, 940, 625], 'face_with_mask': [0, 1373, 2128, 1817, 1858], - 'face_without_mask': [0, 878, 895, 807, 892], - 'weeks': ['2020-08-26 2020-09-01', '2020-09-02 2020-09-08', '2020-09-09 2020-09-15', - '2020-09-16 2020-09-22', '2020-09-23 2020-09-29'] - }) - ] - ) - @freeze_time("2020-09-30") - def test_get_a_weekly_report_properly_weeks_two_areas(self, config_rollback_areas, metric, expected): - """ Remember that time was mocked to 2020-09-30. """ - area, area_2, client, config_sample_path = config_rollback_areas - area_id_1 = area["id"] - area_id_2 = area_2["id"] - weeks = 5 - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id_1},{area_id_2}&weeks={weeks}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("occupancy", { - 'occupancy_threshold': [0, 140, 290, 290, 290], - 'average_occupancy': [0.0, 41.86, 78.86, 92.86, 75.0], - 'max_occupancy': [0.0, 156.0, 235.0, 243.0, 225.0], - 'weeks': ['2020-08-26 2020-09-01', '2020-09-02 2020-09-08', '2020-09-09 2020-09-15', - '2020-09-16 2020-09-22', '2020-09-23 2020-09-29'] - }), - ("social-distancing", { - 'detected_objects': [121, 2352, 3019, 2818, 2961], 'no_infringement': [38, 1511, 2092, 1879, 1935], - 'low_infringement': [22, 437, 433, 518, 539], 'high_infringement': [53, 313, 443, 328, 384], - 'critical_infringement': [8, 88, 57, 90, 100], - 'weeks': ['2020-08-26 2020-09-01', '2020-09-02 2020-09-08', '2020-09-09 2020-09-15', - '2020-09-16 2020-09-22', '2020-09-23 2020-09-29'] - }), - ("face-mask-detections", { - 'no_face': [0, 648, 626, 940, 625], 'face_with_mask': [0, 1373, 2128, 1817, 1858], - 'face_without_mask': [0, 878, 895, 807, 892], - 'weeks': ['2020-08-26 2020-09-01', '2020-09-02 2020-09-08', '2020-09-09 2020-09-15', - '2020-09-16 2020-09-22', '2020-09-23 2020-09-29'] - }) - ] - ) - @freeze_time("2020-09-30") - def test_get_a_weekly_report_properly_weeks_and_valid_ranges_of_dates_two_areas(self, config_rollback_areas, - metric, - expected): - """ - Remember that time was mocked to 2020-09-30. - In addition, note that from_date and to_date should be ignored accordingly with the description. - """ - - area, area_2, client, config_sample_path = config_rollback_areas - area_id_1 = area["id"] - area_id_2 = area_2["id"] - weeks = 5 - from_date = "2020-09-06" - to_date = "2020-09-28" - - response = client.get( - f"/metrics/areas/{metric}/weekly?areas={area_id_1},{area_id_2}&weeks={weeks}&from_date={from_date}" - f"&to_date={to_date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_bad_format_weeks_two_areas(self, config_rollback_areas, metric): - area, area_2, client, config_sample_path = config_rollback_areas - area_id_1 = area["id"] - area_id_2 = area_2["id"] - weeks = "BAD_FORMAT" - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id_1},{area_id_2}&weeks={weeks}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_bad_format_weeks_and_valid_range_of_dates_two_areas(self, config_rollback_areas, - metric): - area, area_2, client, config_sample_path = config_rollback_areas - area_id_1 = area["id"] - area_id_2 = area_2["id"] - weeks = "BAD_FORMAT" - from_date = "2020-09-06" - to_date = "2020-09-28" - - response = client.get( - f"/metrics/areas/{metric}/weekly?areas={area_id_1},{area_id_2}&weeks={weeks}&from_date={from_date}" - f"&to_date={to_date}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_from_date_after_to_date_two_areas(self, config_rollback_areas, metric): - area, area_2, client, config_sample_path = config_rollback_areas - area_id_1 = area["id"] - area_id_2 = area_2["id"] - from_date = "2020-09-17" - to_date = "2020-09-15" - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id_1},{area_id_2}&from_date={from_date}" - f"&to_date={to_date}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric,expected", - [ - ("occupancy", { - 'occupancy_threshold': [0, 0, 0, 0, 0], 'average_occupancy': [0.0, 0.0, 0.0, 0.0, 0.0], - 'max_occupancy': [0.0, 0.0, 0.0, 0.0, 0.0], - 'weeks': ['2007-03-08 2007-03-14', '2007-03-15 2007-03-21', '2007-03-22 2007-03-28', - '2007-03-29 2007-04-04', '2007-04-05 2007-04-11'] - }), - ("social-distancing", { - 'detected_objects': [0, 0, 0, 0, 0], 'no_infringement': [0, 0, 0, 0, 0], - 'low_infringement': [0, 0, 0, 0, 0], 'high_infringement': [0, 0, 0, 0, 0], - 'critical_infringement': [0, 0, 0, 0, 0], - 'weeks': ['2007-03-08 2007-03-14', '2007-03-15 2007-03-21', '2007-03-22 2007-03-28', - '2007-03-29 2007-04-04', '2007-04-05 2007-04-11'] - }), - ("face-mask-detections", { - 'no_face': [0, 0, 0, 0, 0], 'face_with_mask': [0, 0, 0, 0, 0], 'face_without_mask': [0, 0, 0, 0, 0], - 'weeks': ['2007-03-08 2007-03-14', '2007-03-15 2007-03-21', '2007-03-22 2007-03-28', - '2007-03-29 2007-04-04', '2007-04-05 2007-04-11'] - }) - ] - ) - @freeze_time("2007-04-12") - def test_get_a_weekly_report_no_data_for_input_weeks_two_areas(self, config_rollback_areas, metric, - expected): - """ - Remember that time was mocked to 2007-04-12. - Note that there is no data for given input. - - Remember that whether is used: returned response will separate weeks in range taking a number of - weeks ago, considering the week ended yesterday. While when we use from_date and to_date: returned response - will separate weeks in range considering the week starts on Monday - """ - - area, area_2, client, config_sample_path = config_rollback_areas - area_id_1 = area["id"] - area_id_2 = area_2["id"] - weeks = 5 - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id_1},{area_id_2}&weeks={weeks}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("occupancy", { - 'occupancy_threshold': [0, 0, 0, 0, 0, 0], 'average_occupancy': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - 'max_occupancy': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - 'weeks': ['2007-03-08 2007-03-11', '2007-03-12 2007-03-18', '2007-03-19 2007-03-25', - '2007-03-26 2007-04-01', '2007-04-02 2007-04-08', '2007-04-09 2007-04-11'] - }), - ("social-distancing", { - 'detected_objects': [0, 0, 0, 0, 0, 0], 'no_infringement': [0, 0, 0, 0, 0, 0], - 'low_infringement': [0, 0, 0, 0, 0, 0], 'high_infringement': [0, 0, 0, 0, 0, 0], - 'critical_infringement': [0, 0, 0, 0, 0, 0], - 'weeks': ['2007-03-08 2007-03-11', '2007-03-12 2007-03-18', '2007-03-19 2007-03-25', - '2007-03-26 2007-04-01', '2007-04-02 2007-04-08', '2007-04-09 2007-04-11'] - }), - ("face-mask-detections", { - 'no_face': [0, 0, 0, 0, 0, 0], 'face_with_mask': [0, 0, 0, 0, 0, 0], - 'face_without_mask': [0, 0, 0, 0, 0, 0], - 'weeks': ['2007-03-08 2007-03-11', '2007-03-12 2007-03-18', '2007-03-19 2007-03-25', - '2007-03-26 2007-04-01', '2007-04-02 2007-04-08', '2007-04-09 2007-04-11'] - }) - ] - ) - def test_get_a_weekly_report_no_data_for_input_range_of_dates_two_areas(self, config_rollback_areas, metric, - expected): - """ - Remember that whether is used: Separate weeks in range taking a number of weeks ago, considering the - week ended yesterday. While when we use from_date and to_date: Separate weeks in range considering the week - starts on Monday - """ - area, area_2, client, config_sample_path = config_rollback_areas - area_id_1 = area["id"] - area_id_2 = area_2["id"] - from_date = "2007-03-08" - to_date = "2007-04-11" - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id_1},{area_id_2}&from_date={from_date}" - f"&to_date={to_date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_only_from_date_two_areas(self, config_rollback_areas, metric): - """ - Note that here as we do not send to_date, default value will take place, and to_date will be - date.today(). - WARNING: We could not mock the date.today() when the function is called within default query parameters. - So, we must be careful because the data range will be: "2021-01-10" - "today". - """ - - area, area_2, client, config_sample_path = config_rollback_areas - area_id_1 = area["id"] - area_id_2 = area_2["id"] - from_date = "2021-01-10" - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id_1},{area_id_2}&from_date={from_date}") - - assert response.status_code == 200 - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_only_to_date_two_areas(self, config_rollback_areas, metric): - """ - Note that here as we do not send from_date, default value will take place, and from_date will be - date.today(). - WARNING: We could not mock the date.today() when the function is called within default query parameters. - So, we must be careful because the data range will be: "date.today() - timedelta(days=date.today().weekday(), - weeks=4)" - "2020-09-20" and this date range is probably wrong because from_date will be later than to_date. - """ - - area, area_2, client, config_sample_path = config_rollback_areas - area_id_1 = area["id"] - area_id_2 = area_2["id"] - to_date = "2020-09-20" - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id_1},{area_id_2}&to_date={to_date}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_one_date_bad_format_I_two_areas(self, config_rollback_areas, metric): - area, area_2, client, config_sample_path = config_rollback_areas - area_id_1 = area["id"] - area_id_2 = area_2["id"] - from_date = "2007-03-08" - to_date = "BAD_FORMAT" - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id_1},{area_id_2}&from_date={from_date}" - f"&to_date={to_date}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_one_date_bad_format_II_two_areas(self, config_rollback_areas, metric): - area, area_2, client, config_sample_path = config_rollback_areas - area_id_1 = area["id"] - area_id_2 = area_2["id"] - from_date = "BAD_FORMAT" - to_date = "2007-03-08" - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id_1},{area_id_2}&from_date={from_date}" - f"&to_date={to_date}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric", - ["occupancy", "social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_both_dates_bad_format_two_areas(self, config_rollback_areas, metric): - area, area_2, client, config_sample_path = config_rollback_areas - area_id_1 = area["id"] - area_id_2 = area_2["id"] - from_date = "2007-03-08" - to_date = "BAD_FORMAT" - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id_1},{area_id_2}&from_date={from_date}" - f"&to_date={to_date}") - - assert response.status_code == 400 - - # Non existent area - - @pytest.mark.parametrize( - "metric,expected", - [ - ("occupancy", {'detail': "Area with id 'BAD_ID' does not exist"}), - ("social-distancing", {'detail': "Area with id 'BAD_ID' does not exist"}), - ("face-mask-detections", {'detail': "Area with id 'BAD_ID' does not exist"}) - ] - ) - def test_try_get_a_weekly_report_bad_area_id(self, config_rollback_areas, metric, expected): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = "BAD_ID" - from_date = "2020-09-06" - to_date = "2020-09-28" - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 404 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("occupancy", {'detail': "Area with id 'BAD_ID' does not exist"}), - ("social-distancing", {'detail': "Area with id 'BAD_ID' does not exist"}), - ("face-mask-detections", {'detail': "Area with id 'BAD_ID' does not exist"}) - ] - ) - @freeze_time("2020-09-30") - def test_try_get_a_weekly_report_weeks_bad_area_id(self, config_rollback_areas, metric, expected): - """ Remember that time was mocked to 2020-09-30. """ - - area, area_2, client, config_sample_path = config_rollback_areas - area_id = "BAD_ID" - weeks = 5 - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id}&weeks={weeks}") - - assert response.status_code == 404 - assert response.json() == expected - - # Area = "ALL" - - @pytest.mark.parametrize( - "metric,expected", - [ - ("occupancy", { - 'occupancy_threshold': [0, 140, 290, 290, 290], - 'average_occupancy': [0.0, 41.86, 78.86, 92.86, 75.0], - 'max_occupancy': [0.0, 156.0, 235.0, 243.0, 225.0], - 'weeks': ['2020-08-26 2020-09-01', '2020-09-02 2020-09-08', '2020-09-09 2020-09-15', - '2020-09-16 2020-09-22', '2020-09-23 2020-09-29'] - }), - ("social-distancing", { - 'detected_objects': [121, 2352, 3019, 2818, 2961], 'no_infringement': [38, 1511, 2092, 1879, 1935], - 'low_infringement': [22, 437, 433, 518, 539], 'high_infringement': [53, 313, 443, 328, 384], - 'critical_infringement': [8, 88, 57, 90, 100], - 'weeks': ['2020-08-26 2020-09-01', '2020-09-02 2020-09-08', '2020-09-09 2020-09-15', - '2020-09-16 2020-09-22', '2020-09-23 2020-09-29'] - }), - ("face-mask-detections", { - 'no_face': [0, 648, 626, 940, 625], 'face_with_mask': [0, 1373, 2128, 1817, 1858], - 'face_without_mask': [0, 878, 895, 807, 892], - 'weeks': ['2020-08-26 2020-09-01', '2020-09-02 2020-09-08', '2020-09-09 2020-09-15', - '2020-09-16 2020-09-22', '2020-09-23 2020-09-29'] - }) - ] - ) - @freeze_time("2020-09-30") - def test_get_a_weekly_report_properly_weeks_and_valid_ranges_of_dates_id_all(self, config_rollback_areas, - metric, - expected): - """ - Remember that time was mocked to 2020-09-30. - """ - - area, area_2, client, config_sample_path = config_rollback_areas - area_id = "ALL" - weeks = 5 - - response = client.get( - f"/metrics/areas/{metric}/weekly?areas={area_id}&weeks={weeks}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("occupancy", { - 'occupancy_threshold': [140, 290, 290, 290, 140], - 'average_occupancy': [0.0, 63.43, 95.71, 87.57, 0.0], - 'max_occupancy': [0.0, 192.0, 235.0, 243.0, 0.0], - 'weeks': ['2020-09-06 2020-09-06', '2020-09-07 2020-09-13', '2020-09-14 2020-09-20', - '2020-09-21 2020-09-27', '2020-09-28 2020-09-28'] - }), - ("social-distancing", { - 'detected_objects': [20, 3170, 2786, 2854, 94], 'no_infringement': [4, 2135, 1882, 1875, 33], - 'low_infringement': [14, 480, 511, 474, 59], 'high_infringement': [2, 504, 300, 402, 2], - 'critical_infringement': [0, 57, 90, 100, 0], - 'weeks': ['2020-09-06 2020-09-06', '2020-09-07 2020-09-13', '2020-09-14 2020-09-20', - '2020-09-21 2020-09-27', '2020-09-28 2020-09-28'] - }), - ("face-mask-detections", { - 'no_face': [14, 719, 862, 571, 122], 'face_with_mask': [88, 2061, 1810, 1893, 91], - 'face_without_mask': [31, 864, 897, 909, 29], - 'weeks': ['2020-09-06 2020-09-06', '2020-09-07 2020-09-13', '2020-09-14 2020-09-20', - '2020-09-21 2020-09-27', '2020-09-28 2020-09-28'] - }) - ] - ) - def test_get_a_weekly_report_properly_id_all(self, config_rollback_areas, metric, expected): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = "ALL" - from_date = "2020-09-06" - to_date = "2020-09-28" - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("occupancy", { - 'occupancy_threshold': [140, 290, 290, 290, 140], - 'average_occupancy': [0.0, 63.43, 95.71, 87.57, 0.0], - 'max_occupancy': [0.0, 192.0, 235.0, 243.0, 0.0], - 'weeks': ['2020-09-06 2020-09-06', '2020-09-07 2020-09-13', '2020-09-14 2020-09-20', - '2020-09-21 2020-09-27', '2020-09-28 2020-09-28'] - }), - ("social-distancing", { - 'detected_objects': [20, 3170, 2786, 2854, 94], 'no_infringement': [4, 2135, 1882, 1875, 33], - 'low_infringement': [14, 480, 511, 474, 59], 'high_infringement': [2, 504, 300, 402, 2], - 'critical_infringement': [0, 57, 90, 100, 0], - 'weeks': ['2020-09-06 2020-09-06', '2020-09-07 2020-09-13', '2020-09-14 2020-09-20', - '2020-09-21 2020-09-27', '2020-09-28 2020-09-28'] - }), - ("face-mask-detections", { - 'no_face': [14, 719, 862, 571, 122], 'face_with_mask': [88, 2061, 1810, 1893, 91], - 'face_without_mask': [31, 864, 897, 909, 29], - 'weeks': ['2020-09-06 2020-09-06', '2020-09-07 2020-09-13', '2020-09-14 2020-09-20', - '2020-09-21 2020-09-27', '2020-09-28 2020-09-28'] - }) - ] - ) - def test_get_a_weekly_report_properly_ids_all_and_another_valid(self, config_rollback_areas, metric, expected): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = "5,aLl" - from_date = "2020-09-06" - to_date = "2020-09-28" - - response = client.get(f"/metrics/areas/{metric}/weekly?areas={area_id}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 200 - assert response.json() == expected diff --git a/api/tests/app/test_area_occupancy_rules.py b/api/tests/app/test_area_occupancy_rules.py deleted file mode 100644 index d207fa91..00000000 --- a/api/tests/app/test_area_occupancy_rules.py +++ /dev/null @@ -1,211 +0,0 @@ -import pytest -import os -from copy import deepcopy - -from api.utils import get_config -from libs.utils import config as config_utils - -# The line below is absolutely necessary. Fixtures are passed as arguments to test functions. That is why IDE could -# not recognized them. -from api.tests.utils.fixtures_tests import config_rollback_areas, rollback_area_config_path - - -def rollback_area_config_file(area_id): - """area_id must be an string""" - config_directory = config_utils.get_area_config_directory(get_config()) - config_path = os.path.join(config_directory, area_id + ".json") - if os.path.exists(config_path): - os.remove(config_path) - - -# pytest -v api/tests/app/test_area_occupancy_rules.py::TestsOccupancyRules -class TestsOccupancyRules: - """ LIVE """ - """ Get Area Occupancy Rules, GET /areas/occupancy-rules/:id """ - """ Set Area Occupancy Rules, PUT /areas/occupancy-rules/:id """ - """ Delete Area Occupancy Rules, DELETE /areas/occupancy-rules/:id """ - - base_data = {"occupancy_rules": - [ - { - "days": [True, True, False, False, False, True, True], - "start_hour":"12:00", - "finish_hour":"00:00", - "max_occupancy":12 - }, { - "days": [True, True, False, False, False, True, True], - "start_hour":"00:00", - "finish_hour":"11:00", - "max_occupancy":11 - } - ], - "id": 5, - "name": "Kitchen", - "cameras": "0" - } - - def test_set_correct_area_occupancy_rules(self, config_rollback_areas, rollback_area_config_path): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = area['id'] - - data = deepcopy(self.base_data) - data["id"] = area_id - response = client.put(f"/areas/{area_id}", json=data) - - assert response.status_code == 200 - - def test_unitary_set_get_delete(self, config_rollback_areas, rollback_area_config_path): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = 537 - - data = deepcopy(self.base_data) - data["id"] = area_id - post_response = client.post(f"/areas", json=data) - get_response1 = client.get(f"/areas/{area_id}") - data["occupancy_rules"][0]["max_occupancy"] = 100 - put_response = client.put(f"/areas/{area_id}", json=data) - get_response2 = client.get(f"/areas/{area_id}") - delete_response = client.delete(f"/areas/{area_id}") - get_response3 = client.get(f"/areas/{area_id}") - - assert post_response.status_code == 201 - assert put_response.status_code == 200 - assert get_response1.status_code == 200 - assert get_response2.status_code == 200 - assert delete_response.status_code == 204 - assert get_response3.status_code == 404 - - res1 = get_response1.json() - res2 = get_response2.json() - assert res1 != res2 - assert res1["occupancy_rules"][0]["max_occupancy"] == 12 - assert res2["occupancy_rules"][0]["max_occupancy"] == 100 - - rollback_area_config_file(str(area_id)) - - def test_get_not_found(self, config_rollback_areas, rollback_area_config_path): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = 404 - - get_response = client.get(f"/areas/{area_id}") - - assert get_response.status_code == 404 - - def test_get_empty(self, config_rollback_areas, rollback_area_config_path): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = 5 - - data = deepcopy(self.base_data) - data["id"] = area_id - data["occupancy_rules"] = [] - response = client.put(f"/areas/{area_id}", json=data) - get_response = client.get(f"/areas/{area_id}") - - assert get_response.status_code == 200 - assert get_response.json()["occupancy_rules"] == [] - - def test_set_invalid_threshold(self, config_rollback_areas, rollback_area_config_path): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = area['id'] - - data = deepcopy(self.base_data) - data["occupancy_rules"][0]["max_occupancy"] = -1 - response = client.put(f"/areas/{area_id}", json=data) - - assert response.status_code == 400 - - def test_set_invalid_start_hour(self, config_rollback_areas, rollback_area_config_path): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = area['id'] - - data = deepcopy(self.base_data) - data["occupancy_rules"][0]["start_hour"] = "24:60" - response = client.put(f"/areas/{area_id}", json=data) - - assert response.status_code == 400 - - def test_set_invalid_start_finish_hour(self, config_rollback_areas, rollback_area_config_path): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = area['id'] - - data = deepcopy(self.base_data) - data["occupancy_rules"][0]["start_hour"] = "23:00" - data["occupancy_rules"][0]["finish_hour"] = "22:00" - response = client.put(f"/areas/{area_id}", json=data) - - assert response.status_code == 400 - - def test_set_overlap_complete(self, config_rollback_areas, rollback_area_config_path): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = area['id'] - - data = deepcopy(self.base_data) - data["occupancy_rules"][0]["start_hour"] = "12:00" - data["occupancy_rules"][0]["finish_hour"] = "22:00" - data["occupancy_rules"][1]["start_hour"] = "14:00" - data["occupancy_rules"][1]["finish_hour"] = "20:00" - response = client.put(f"/areas/{area_id}", json=data) - - assert response.status_code == 400 - - def test_set_overlap_start(self, config_rollback_areas, rollback_area_config_path): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = area['id'] - - data = deepcopy(self.base_data) - data["occupancy_rules"][0]["start_hour"] = "12:00" - data["occupancy_rules"][0]["finish_hour"] = "22:00" - data["occupancy_rules"][1]["start_hour"] = "10:00" - data["occupancy_rules"][1]["finish_hour"] = "20:00" - response = client.put(f"/areas/{area_id}", json=data) - - assert response.status_code == 400 - - def test_set_overlap_end(self, config_rollback_areas, rollback_area_config_path): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = area['id'] - - data = deepcopy(self.base_data) - data["occupancy_rules"][0]["start_hour"] = "12:00" - data["occupancy_rules"][0]["finish_hour"] = "22:00" - data["occupancy_rules"][1]["start_hour"] = "20:00" - data["occupancy_rules"][1]["finish_hour"] = "23:00" - response = client.put(f"/areas/{area_id}", json=data) - - assert response.status_code == 400 - - def test_set_overlap_zero(self, config_rollback_areas): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = area['id'] - - data = deepcopy(self.base_data) - data["occupancy_rules"][0]["start_hour"] = "00:00" - data["occupancy_rules"][0]["finish_hour"] = "14:00" - data["occupancy_rules"][1]["start_hour"] = "13:00" - data["occupancy_rules"][1]["finish_hour"] = "00:00" - response = client.put(f"/areas/{area_id}", json=data) - - assert response.status_code == 400 - - def test_set_contiguous(self, config_rollback_areas, rollback_area_config_path): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = area['id'] - - data = deepcopy(self.base_data) - data["occupancy_rules"][0]["start_hour"] = "12:00" - data["occupancy_rules"][0]["finish_hour"] = "22:00" - data["occupancy_rules"][1]["start_hour"] = "22:00" - data["occupancy_rules"][1]["finish_hour"] = "23:00" - response = client.put(f"/areas/{area_id}", json=data) - - assert response.status_code == 200 - - def test_set_wrong_days(self, config_rollback_areas, rollback_area_config_path): - area, area_2, client, config_sample_path = config_rollback_areas - area_id = area['id'] - - data = deepcopy(self.base_data) - data["occupancy_rules"][0]["days"] = [True, False, True, False, True] # should be 7 - response = client.put(f"/areas/{area_id}", json=data) - - assert response.status_code == 400 diff --git a/api/tests/data/config-x86-openvino_EMPTY.ini b/api/tests/data/config-x86-openvino_EMPTY.ini index 58d5d2c6..0ab0c338 100644 --- a/api/tests/data/config-x86-openvino_EMPTY.ini +++ b/api/tests/data/config-x86-openvino_EMPTY.ini @@ -33,18 +33,6 @@ LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/api/tests/data/mocked_data/data/processor/static/data/performace-metrics EntityConfigDirectory = /repo/api/tests/data/mocked_data/data/processor/config -[Area_0] -Id = 0 -Name = Kitchen -Cameras = 0 -NotifyEveryMinutes = 0 -Emails = -EnableSlackNotifications = False -OccupancyThreshold = 300 -ViolationThreshold = 60 -DailyReport = False -DailyReportTime = 06:00 - [Detector] ; Supported devices: Jetson , EdgeTPU, Dummy, x86 Device = x86 @@ -107,11 +95,6 @@ Authorization = TimeInterval = 0.5 Enabled = False -[AreaLogger_0] -Name = file_system_logger -LogDirectory = /repo/api/tests/data/mocked_data/data/processor/static/data/areas -Enabled = True - ; Enable the PeriodicTask_0 if you want to generate metrics [PeriodicTask_0] Name = metrics diff --git a/api/tests/data/config-x86-openvino_JUST_CAMERAS.ini b/api/tests/data/config-x86-openvino_JUST_CAMERAS.ini index 016ec53f..9dd538c7 100644 --- a/api/tests/data/config-x86-openvino_JUST_CAMERAS.ini +++ b/api/tests/data/config-x86-openvino_JUST_CAMERAS.ini @@ -33,18 +33,6 @@ LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/api/tests/data/mocked_data/data/processor/static/data/performace-metrics EntityConfigDirectory = /repo/api/tests/data/mocked_data/data/processor/config -[Area_0] -Id = 0 -Name = Kitchen -Cameras = 0 -NotifyEveryMinutes = 0 -Emails = -EnableSlackNotifications = False -OccupancyThreshold = 300 -ViolationThreshold = 60 -DailyReport = False -DailyReportTime = 06:00 - [Detector] ; Supported devices: Jetson , EdgeTPU, Dummy, x86 Device = x86 @@ -107,11 +95,6 @@ Authorization = TimeInterval = 0.5 Enabled = False -[AreaLogger_0] -Name = file_system_logger -LogDirectory = /repo/api/tests/data/mocked_data/data/processor/static/data/areas -Enabled = True - ; Enable the PeriodicTask_0 if you want to generate metrics [PeriodicTask_0] Name = metrics diff --git a/api/tests/data/config-x86-openvino_METRICS.ini b/api/tests/data/config-x86-openvino_METRICS.ini index 627ffbf3..6c4c2cc4 100644 --- a/api/tests/data/config-x86-openvino_METRICS.ini +++ b/api/tests/data/config-x86-openvino_METRICS.ini @@ -97,11 +97,6 @@ Authorization = TimeInterval = 0.5 Enabled = False -[AreaLogger_0] -Name = file_system_logger -LogDirectory = /repo/api/tests/data/mocked_data/data/processor/static/data/areas -Enabled = True - [PeriodicTask_0] Name = metrics Enabled = False @@ -169,27 +164,3 @@ Tags = bedroom,living_room DistMethod = CenterPointsDistance LiveFeedEnabled = False -[Area_1] -ViolationThreshold = 100 -NotifyEveryMinutes = 15 -Emails = Michael@email.com,Sanz@email.com -EnableSlackNotifications = False -DailyReport = True -DailyReportTime = 11:38 -OccupancyThreshold = 300 -Id = 5 -Name = Kitchen -Cameras = 49,50 - -[Area_0] -ViolationThreshold = 100 -NotifyEveryMinutes = 15 -Emails = Michael@email.com,Sanz@email.com -EnableSlackNotifications = False -DailyReport = True -DailyReportTime = 02:12 -OccupancyThreshold = 300 -Id = 6 -Name = Kitchen -Cameras = 51,52 - diff --git a/api/tests/data/mocked_data/data/processor/config/areas/ALL.json b/api/tests/data/mocked_data/data/processor/config/areas/ALL.json deleted file mode 100644 index 92877c9c..00000000 --- a/api/tests/data/mocked_data/data/processor/config/areas/ALL.json +++ /dev/null @@ -1 +0,0 @@ -{"global_area_all": {"ViolationThreshold": 0, "NotifyEveryMinutes": 0, "Emails": "", "EnableSlackNotifications": false, "DailyReport": false, "DailyReportTime": "N/A", "OccupancyThreshold": 0, "Id": "ALL", "Name": "ALL"}} \ No newline at end of file diff --git a/api/tests/utils/example_models.py b/api/tests/utils/example_models.py index 92cbb55c..e43ca366 100644 --- a/api/tests/utils/example_models.py +++ b/api/tests/utils/example_models.py @@ -74,33 +74,3 @@ "dist_method": "CenterPointsDistance", "live_feed_enabled": False } - -area_example = { - "violation_threshold": 100, - "notify_every_minutes": 15, - "emails": "Michael@email.com,Sanz@email.com", - "enable_slack_notifications": False, - "daily_report": True, - "daily_report_time": "11:38", - "occupancy_threshold": 300, - "id": "5", - "name": "Kitchen", - "cameras": "49,50" -} - - -area_example_2 = { - "violation_threshold": 100, - "notify_every_minutes": 15, - "emails": "Michael@email.com,Sanz@email.com", - "enable_slack_notifications": False, - "daily_report": True, - "daily_report_time": "02:12", - "occupancy_threshold": 300, - "id": "6", - "name": "Kitchen", - "cameras": "51,52" -} - - - diff --git a/api/tests/utils/fixtures_tests.py b/api/tests/utils/fixtures_tests.py index af2cb258..9f3bac07 100644 --- a/api/tests/utils/fixtures_tests.py +++ b/api/tests/utils/fixtures_tests.py @@ -8,15 +8,13 @@ from fastapi.testclient import TestClient -from constants import ALL_AREAS from libs.config_engine import ConfigEngine from api.settings import Settings from api.tests.utils.common_functions import create_app_config from libs.utils import config as config_utils -from .example_models import camera_template, camera_example, camera_example_2, camera_example_3, camera_example_4,\ - area_example, area_example_2 +from .example_models import camera_template, camera_example, camera_example_2, camera_example_3, camera_example_4 from ...utils import get_config @@ -24,7 +22,7 @@ def config_rollback_base(option="JUST_CAMERAS"): original_path = "" if option == "EMPTY": """ - Empty template with no camera or area. + Empty template with no camera. """ original_path = "/repo/api/tests/data/config-x86-openvino_EMPTY.ini" elif option == "JUST_CAMERAS": @@ -36,11 +34,11 @@ def config_rollback_base(option="JUST_CAMERAS"): original_path = "/repo/api/tests/data/config-x86-openvino_JUST_CAMERAS.ini" elif option == "METRICS": """ - Here there are charged 4 cameras and two areas: - camera_example (ID: 49), Area 5 - camera_example_2 (ID: 50), Area 5 - camera_example_3 (ID: 51), Area 6 - camera_example_4 (ID: 52), Area 6 + Here there are charged 4 cameras: + camera_example (ID: 49) + camera_example_2 (ID: 50) + camera_example_3 (ID: 51) + camera_example_4 (ID: 52) """ original_path = "/repo/api/tests/data/config-x86-openvino_METRICS.ini" config_sample_path_to_modify = "/repo/api/tests/data/config-x86-openvino_TEMPORARY.ini" @@ -86,13 +84,6 @@ def config_rollback(): os.remove(config_sample_path_to_modify) -@pytest.fixture -def config_rollback_areas(): - client, config_sample_path_to_modify = config_rollback_base(option="METRICS") - yield area_example, area_example_2, client, config_sample_path_to_modify - os.remove(config_sample_path_to_modify) - - @pytest.fixture def config_rollback_cameras(): client, config_sample_path_to_modify = config_rollback_base(option="JUST_CAMERAS") @@ -182,28 +173,3 @@ def heatmap_simulation(): yield None # Deletes everything shutil.rmtree(heatmap_directory) - -@pytest.fixture -def rollback_area_all_json(): - config_directory = config_utils.get_area_config_directory(get_config()) - config_path = os.path.join(config_directory, ALL_AREAS + ".json") - - try: - with open(config_path, "r") as file: - file_content = json.load(file) - except Exception: - yield False - else: - yield True - with open(config_path, "w") as file: - json.dump(file_content, file) - - -@pytest.fixture -def rollback_area_config_path(): - yield None - config_directory = config_utils.get_area_config_directory(get_config()) - for area_id in [area_example["id"], area_example_2["id"]]: - config_path = os.path.join(config_directory, area_id + ".json") - if os.path.exists(config_path): - os.remove(config_path) diff --git a/api/utils.py b/api/utils.py index 8b3ce4d5..60a31cb3 100644 --- a/api/utils.py +++ b/api/utils.py @@ -21,14 +21,10 @@ def extract_config(config_type="all"): sections = get_config().get_sections() if config_type == "cameras": sections = [x for x in sections if x.startswith("Source_")] - elif config_type == "areas": - sections = [x for x in sections if x.startswith("Area_")] elif config_type == "source_post_processors": sections = [x for x in sections if x.startswith("SourcePostProcessor_")] elif config_type == "source_loggers": sections = [x for x in sections if x.startswith("SourceLogger_")] - elif config_type == "area_loggers": - sections = [x for x in sections if x.startswith("AreaLogger_")] elif config_type == "periodic_tasks": sections = [x for x in sections if x.startswith("PeriodicTask_")] config = {} @@ -87,17 +83,6 @@ def handle_response(response, success, status_code=status.HTTP_200_OK, decameliz return JSONResponse(status_code=status_code, content=content) -def reestructure_areas(config_dict): - """Ensure that all [Area_0, Area_1, ...] are consecutive""" - area_names = [x for x in config_dict.keys() if x.startswith("Area_")] - area_names.sort() - for index, area_name in enumerate(area_names): - if f"Area_{index}" != area_name: - config_dict[f"Area_{index}"] = config_dict[area_name] - config_dict.pop(area_name) - return config_dict - - def clean_up_file(filename): if os.path.exists(filename): if os.path.isdir(filename): diff --git a/config-coral.ini b/config-coral.ini index 4124f801..b511a836 100644 --- a/config-coral.ini +++ b/config-coral.ini @@ -17,7 +17,6 @@ HeatmapResolution = 150,150 LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/data/processor/static/data/performace-metrics EntityConfigDirectory = /repo/data/processor/config -ProcessAreas = True [API] Host = 0.0.0.0 @@ -32,18 +31,6 @@ Host = 0.0.0.0 QueuePort = 8010 QueueAuthKey = shibalba -[Area_0] -Id = 0 -Name = Kitchen -Cameras = 0 -NotifyEveryMinutes = 0 -Emails = -EnableSlackNotifications = False -OccupancyThreshold = 300 -ViolationThreshold = 60 -DailyReport = False -DailyReportTime = 06:00 - [Source_0] VideoPath = /repo/data/softbio_vid.mp4 Tags = kitchen @@ -140,11 +127,6 @@ TimeInterval = 0.5 Enabled = False SendingInterval = 5 -[AreaLogger_0] -Name = file_system_logger -LogDirectory = /repo/data/processor/static/data/areas -Enabled = True - ; Enable the PeriodicTask_0 if you want to generate metrics [PeriodicTask_0] Name = metrics diff --git a/config-jetson-nano.ini b/config-jetson-nano.ini index fd74862a..8a7146f2 100644 --- a/config-jetson-nano.ini +++ b/config-jetson-nano.ini @@ -20,7 +20,6 @@ HeatmapResolution = 150,150 LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/data/processor/static/data/performace-metrics EntityConfigDirectory = /repo/data/processor/config -ProcessAreas = True [API] Host = 0.0.0.0 @@ -35,18 +34,6 @@ Host = 0.0.0.0 QueuePort = 8010 QueueAuthKey = shibalba -[Area_0] -Id = 0 -Name = Kitchen -Cameras = 0 -NotifyEveryMinutes = 0 -Emails = -EnableSlackNotifications = False -OccupancyThreshold = 300 -ViolationThreshold = 60 -DailyReport = False -DailyReportTime = 06:00 - [Source_0] VideoPath = /repo/data/softbio_vid.mp4 Tags = kitchen @@ -131,11 +118,6 @@ TimeInterval = 0.5 Enabled = False SendingInterval = 5 -[AreaLogger_0] -Name = file_system_logger -LogDirectory = /repo/data/processor/static/data/areas -Enabled = True - ; Enable the PeriodicTask_0 if you want to generate metrics [PeriodicTask_0] Name = metrics diff --git a/config-jetson-tx2.ini b/config-jetson-tx2.ini index 014c8b7f..2c62cc2f 100644 --- a/config-jetson-tx2.ini +++ b/config-jetson-tx2.ini @@ -20,7 +20,6 @@ HeatmapResolution = 150,150 LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/data/processor/static/data/performace-metrics EntityConfigDirectory = /repo/data/processor/config -ProcessAreas = True [API] Host = 0.0.0.0 @@ -35,18 +34,6 @@ Host = 0.0.0.0 QueuePort = 8010 QueueAuthKey = shibalba -[Area_0] -Id = 0 -Name = Kitchen -Cameras = 0 -NotifyEveryMinutes = 0 -Emails = -EnableSlackNotifications = False -OccupancyThreshold = 300 -ViolationThreshold = 60 -DailyReport = False -DailyReportTime = 06:00 - [Source_0] VideoPath = /repo/data/softbio_vid.mp4 Tags = kitchen @@ -141,11 +128,6 @@ TimeInterval = 0.5 Enabled = False SendingInterval = 5 -[AreaLogger_0] -Name = file_system_logger -LogDirectory = /repo/data/processor/static/data/areas -Enabled = True - ; Enable the PeriodicTask_0 if you want to generate metrics [PeriodicTask_0] Name = metrics diff --git a/config-x86-gpu-tensorrt.ini b/config-x86-gpu-tensorrt.ini index 4c7465a6..36e36565 100644 --- a/config-x86-gpu-tensorrt.ini +++ b/config-x86-gpu-tensorrt.ini @@ -34,19 +34,6 @@ HeatmapResolution = 150,150 LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/data/processor/static/data/performace-metrics EntityConfigDirectory = /repo/data/processor/config -ProcessAreas = True - -[Area_0] -Id = 0 -Name = Kitchen -Cameras = 0 -NotifyEveryMinutes = 0 -Emails = -EnableSlackNotifications = False -OccupancyThreshold = 300 -ViolationThreshold = 60 -DailyReport = False -DailyReportTime = 06:00 [Source_0] VideoPath = /repo/data/softbio_vid.mp4 @@ -143,11 +130,6 @@ TimeInterval = 0.5 Enabled = False SendingInterval = 5 -[AreaLogger_0] -Name = file_system_logger -LogDirectory = /repo/data/processor/static/data/areas -Enabled = True - ; Enable the PeriodicTask_0 if you want to generate metrics [PeriodicTask_0] Name = metrics diff --git a/config-x86-gpu.ini b/config-x86-gpu.ini index f9c20259..44274a7e 100644 --- a/config-x86-gpu.ini +++ b/config-x86-gpu.ini @@ -5,7 +5,6 @@ UseAuthToken = False SSLEnabled = False SSLCertificateFile = SSLKeyFile = -PorcessAreas = True [CORE] Host = 0.0.0.0 @@ -32,19 +31,6 @@ HeatmapResolution = 150,150 LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/data/processor/static/data/performace-metrics EntityConfigDirectory = /repo/data/processor/config -ProcessAreas = True - -[Area_0] -Id = 0 -Name = Kitchen -Cameras = 0 -NotifyEveryMinutes = 0 -Emails = -EnableSlackNotifications = False -OccupancyThreshold = 300 -ViolationThreshold = 60 -DailyReport = False -DailyReportTime = 06:00 [Source_0] VideoPath = /repo/data/softbio_vid.mp4 @@ -141,11 +127,6 @@ TimeInterval = 0.5 Enabled = False SendingInterval = 5 -[AreaLogger_0] -Name = file_system_logger -LogDirectory = /repo/data/processor/static/data/areas -Enabled = True - ; Enable the PeriodicTask_0 if you want to generate metrics [PeriodicTask_0] Name = metrics diff --git a/config-x86-openvino.ini b/config-x86-openvino.ini index e7daee9c..7fe822e3 100644 --- a/config-x86-openvino.ini +++ b/config-x86-openvino.ini @@ -32,19 +32,6 @@ HeatmapResolution = 150,150 LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/data/processor/static/data/performace-metrics EntityConfigDirectory = /repo/data/processor/config -ProcessAreas = True - -[Area_0] -Id = 0 -Name = Kitchen -Cameras = 0 -NotifyEveryMinutes = 0 -Emails = -EnableSlackNotifications = False -OccupancyThreshold = 300 -ViolationThreshold = 60 -DailyReport = False -DailyReportTime = 06:00 [Source_0] VideoPath = /repo/data/softbio_vid.mp4 @@ -128,11 +115,6 @@ TimeInterval = 0.5 Enabled = False SendingInterval = 5 -[AreaLogger_0] -Name = file_system_logger -LogDirectory = /repo/data/processor/static/data/areas -Enabled = True - ; Enable the PeriodicTask_0 if you want to generate metrics [PeriodicTask_0] Name = metrics diff --git a/config-x86.ini b/config-x86.ini index abbeefde..9e9ea4e8 100644 --- a/config-x86.ini +++ b/config-x86.ini @@ -33,19 +33,6 @@ HeatmapResolution = 150,150 LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/data/processor/static/data/performace-metrics EntityConfigDirectory = /repo/data/processor/config -ProcessAreas = True - -[Area_0] -Id = 0 -Name = Kitchen -Cameras = 0 -NotifyEveryMinutes = 0 -Emails = -EnableSlackNotifications = False -OccupancyThreshold = 300 -ViolationThreshold = 60 -DailyReport = False -DailyReportTime = 06:00 [Source_0] VideoPath = /repo/data/softbio_vid.mp4 @@ -143,11 +130,6 @@ TimeInterval = 0.5 Enabled = False SendingInterval = 5 -[AreaLogger_0] -Name = file_system_logger -LogDirectory = /repo/data/processor/static/data/areas -Enabled = True - ; Enable the PeriodicTask_0 if you want to generate metrics [PeriodicTask_0] Name = metrics diff --git a/constants.py b/constants.py index 853b1eeb..2166a806 100644 --- a/constants.py +++ b/constants.py @@ -1,9 +1,7 @@ PROCESSOR_VERSION = "0.7.0" # Entities -AREAS = "areas" CAMERAS = "cameras" -ALL_AREAS = "ALL" # Metrics OCCUPANCY = "occupancy" diff --git a/libs/area_engine.py b/libs/area_engine.py deleted file mode 100755 index 3ba9537d..00000000 --- a/libs/area_engine.py +++ /dev/null @@ -1,96 +0,0 @@ -import os -import time -import logging -import csv - -from datetime import date, datetime -from collections import deque - -from libs.config_engine import ConfigEngine -from libs.loggers.area_loggers.logger import Logger -from libs.entities.area import Area -from .utils.loggers import get_source_log_directory, get_source_logging_interval -from .utils.mailing import MailService -from .notifications.slack_notifications import SlackService - -logger = logging.getLogger(__name__) - - -class AreaEngine: - - def __init__(self, config: ConfigEngine, area: Area): - self.processing_alerts = False - self.config = config - self.area = area - - self.occupancy_sleep_time_interval = float(self.config.get_section_dict("App")["OccupancyAlertsMinInterval"]) - self.log_dir = get_source_log_directory(config) - self.idle_time = get_source_logging_interval(config) - self.area_id = self.area.id - self.area_name = self.area.name - self.should_send_email_notifications = self.area.should_send_email_notifications - self.should_send_slack_notifications = self.area.should_send_slack_notifications - self.cameras = [camera for camera in self.config.get_video_sources() if camera["id"] in self.area.cameras] - for camera in self.cameras: - camera.file_path = os.path.join(self.log_dir, camera["id"], "objects_log") - camera.last_processed_time = time.time() - - if self.should_send_email_notifications: - self.mail_service = MailService(config) - if self.should_send_slack_notifications: - self.slack_service = SlackService(config) - - self.last_notification_time = 0 - - self.loggers = [] - loggers_names = [x for x in self.config.get_sections() if x.startswith("AreaLogger_")] - for l_name in loggers_names: - if self.config.get_boolean(l_name, "Enabled"): - self.loggers.append(Logger(self.config, area.section, l_name)) - - def process_area(self): - # Sleep for a while so cameras start processing - time.sleep(15) - - self.processing_area = True - logger.info(f"Enabled processing area - {self.area_id}: {self.area_name} with {len(self.cameras)} cameras") - while self.processing_area: - camera_file_paths = [os.path.join(camera.file_path, str(date.today()) + ".csv") for camera in self.cameras] - if not all(list(map(os.path.isfile, camera_file_paths))): - # Wait before csv for this day are created - logger.info(f"Area reporting on - {self.area_id}: {self.area_name} is waiting for reports to be created") - time.sleep(5) - else: - occupancy = 0 - active_cameras = [] - for camera in self.cameras: - with open(os.path.join(camera.file_path, str(date.today()) + ".csv"), "r") as log: - last_log = deque(csv.DictReader(log), 1)[0] - log_time = datetime.strptime(last_log["Timestamp"], "%Y-%m-%d %H:%M:%S") - # TODO: If the TimeInterval of the Logger is more than 30 seconds this would have to be revised. - if (datetime.now() - log_time).total_seconds() < 30: - occupancy += int(last_log["DetectedObjects"]) - active_cameras.append({"camera_id": camera.id, "camera_name": camera.name}) - else: - logger.warn(f"Logs aren't being updated for camera {camera.id} - {camera.name}") - - for l in self.loggers: - l.update(active_cameras, {"occupancy": occupancy}) - - threshold = self.area.get_occupancy_threshold(datetime.now()) - if (occupancy > threshold - and time.time() - self.last_notification_time > self.occupancy_sleep_time_interval): - # Trigger alerts - self.last_notification_time = time.time() - if self.should_send_email_notifications: - self.mail_service.send_occupancy_notification(self.area, occupancy, threshold) - if self.should_send_slack_notifications: - self.slack_service.occupancy_alert(self.area, occupancy, threshold) - # Sleep until new data is logged - time.sleep(self.idle_time) - - self.stop_process_area() - - def stop_process_area(self): - logger.info(f"Disabled processing area - {self.area_id}: {self.area_name}") - self.processing_area = False diff --git a/libs/area_threading.py b/libs/area_threading.py deleted file mode 100644 index b738cfd0..00000000 --- a/libs/area_threading.py +++ /dev/null @@ -1,68 +0,0 @@ -import os -import logging - -import time - -from datetime import datetime -from threading import Thread -from libs.area_engine import AreaEngine - -logger = logging.getLogger(__name__) - - -def run_area_processing(config, pipe, areas): - pid = os.getpid() - logger.info(f"[{pid}] taking on notifications for {len(areas)} areas") - threads = [] - for area in areas: - engine = AreaThread(config, area) - engine.start() - threads.append(engine) - - # Wait for a signal to die - pipe.recv() - logger.info(f"[{pid}] will stop area alerts and die") - for t in threads: - t.stop() - - logger.info(f"[{pid}] Goodbye!") - - -class AreaThread(Thread): - def __init__(self, config, area): - Thread.__init__(self) - self.engine = None - self.config = config - self.area = area - - def run(self): - try: - self.engine = AreaEngine(self.config, self.area) - restarts = 0 - max_restarts = int(self.config.get_section_dict("App")["MaxThreadRestarts"]) - if not self.config.get_boolean("App", "ProcessAreas"): - # Ignore the area processing - return - while True: - try: - last_restart_time = datetime.now() - self.engine.process_area() - except Exception as e: - logging.error(e, exc_info=True) - logging.info(f"Exception processing area {self.area.name}") - if (datetime.now() - last_restart_time).total_seconds() > 60: - # If the last restart was previous than 1 minute ago, restart the counter. - restarts = 0 - if restarts == max_restarts: - raise e - # Sleep the thread for 5 seconds and try to process the area again - time.sleep(5) - logging.info("Restarting the area processing") - restarts += 1 - except Exception as e: - logging.error(e, exc_info=True) - raise e - - def stop(self): - self.engine.stop_process_area() - self.join() diff --git a/libs/backups/s3_backup.py b/libs/backups/s3_backup.py index f5a2c6b5..bd9ebd79 100644 --- a/libs/backups/s3_backup.py +++ b/libs/backups/s3_backup.py @@ -3,20 +3,18 @@ from datetime import date, timedelta from libs.config_engine import ConfigEngine -from libs.metrics import FaceMaskUsageMetric, OccupancyMetric, SocialDistancingMetric, DwellTimeMetric, InOutMetric +from libs.metrics import FaceMaskUsageMetric, SocialDistancingMetric, DwellTimeMetric, InOutMetric from libs.uploaders.s3_uploader import S3Uploader -from libs.utils.loggers import get_area_log_directory, get_source_log_directory +from libs.utils.loggers import get_source_log_directory def raw_data_backup(config: ConfigEngine, bucket_name: str): """ - Uploads into S3 the raw data generated by the cameras and the areas. + Uploads into S3 the raw data generated by the cameras. """ s3_uploader = S3Uploader() sources = config.get_video_sources() - areas = config.get_areas() source_log_directory = get_source_log_directory(config) - area_log_directory = get_area_log_directory(config) # Backup all the source files for src in sources: source_directory = os.path.join(source_log_directory, src["id"]) @@ -26,26 +24,14 @@ def raw_data_backup(config: ConfigEngine, bucket_name: str): if os.path.isfile(today_objects_csv): # Upload the today object files to S3 s3_uploader.upload_file(bucket_name, today_objects_csv, f"{str(date.today())}.csv", bucket_prefix) - # Backup all the area files - for area in areas: - area_directory = os.path.join(area_log_directory, area.id) - occupancy_log_directory = os.path.join(area_directory, "occupancy_log") - today_occupancy_csv = os.path.join(occupancy_log_directory, str(date.today()) + ".csv") - bucket_prefix = f"areas/{area.id}/occupancy_log" - if os.path.isfile(today_objects_csv): - # Upload the today occupancy files to S3 - s3_uploader.upload_file(bucket_name, today_occupancy_csv, f"{str(date.today())}.csv", bucket_prefix) - def reports_backup(config: ConfigEngine, bucket_name: str): """ - Uploads into s3 the reports generated yesterday by the cameras and the areas. + Uploads into s3 the reports generated yesterday by the cameras. """ s3_uploader = S3Uploader() sources = config.get_video_sources() - areas = config.get_areas() source_log_directory = get_source_log_directory(config) - area_log_directory = get_area_log_directory(config) yesterday = str(date.today() - timedelta(days=1)) # Backup the sources yesterday reports for src in sources: @@ -60,15 +46,4 @@ def reports_backup(config: ConfigEngine, bucket_name: str): if os.path.isfile(metric_hourly_report): s3_uploader.upload_file(bucket_name, metric_hourly_report, f"report_{yesterday}.csv", bucket_prefix) if os.path.isfile(metric_daily_report): - s3_uploader.upload_file(bucket_name, metric_daily_report, "report.csv", bucket_prefix) - # Backup the areas yesterday reports - for area in areas: - area_directory = os.path.join(area_log_directory, area.id) - occupancy_reports_directory = os.path.join(area_directory, "reports", OccupancyMetric.reports_folder) - occupancy_hourly_report = os.path.join(occupancy_reports_directory, f"report_{yesterday}.csv") - occupancy_daily_report = os.path.join(occupancy_reports_directory, "report.csv") - bucket_prefix = f"areas/{area.id}/reports/{OccupancyMetric.reports_folder}" - if os.path.isfile(occupancy_hourly_report): - s3_uploader.upload_file(bucket_name, occupancy_hourly_report, f"report_{yesterday}.csv", bucket_prefix) - if os.path.isfile(occupancy_daily_report): - s3_uploader.upload_file(bucket_name, occupancy_hourly_report, "report.csv", bucket_prefix) + s3_uploader.upload_file(bucket_name, metric_daily_report, "report.csv", bucket_prefix) \ No newline at end of file diff --git a/libs/config_engine.py b/libs/config_engine.py index 9fa5322f..df2e8568 100644 --- a/libs/config_engine.py +++ b/libs/config_engine.py @@ -4,12 +4,10 @@ import configparser import threading -from constants import ALL_AREAS from libs.notifications.slack_notifications import is_slack_configured from libs.utils.mailing import is_mailing_configured from libs.utils import config as config_utils -from libs.utils.loggers import get_area_log_directory, get_source_log_directory -from libs.entities.area import Area +from libs.utils.loggers import get_source_log_directory from libs.entities.video_source import VideoSource @@ -130,12 +128,12 @@ def set_option_in_section(self, section, option, value): def update_config(self, config, save_file=True): current_sections = [] for section, options in config.items(): - if section.startswith(("Source", "Area", "PeriodicTask")): + if section.startswith(("Source", "PeriodicTask")): current_sections.append(section) for option, value in options.items(): self.set_option_in_section(section, option, value) for section in self.config.sections(): - if (len(current_sections) and section.startswith(("Source", "Area", "PeriodicTask")) + if (len(current_sections) and section.startswith(("Source", "PeriodicTask")) and section not in current_sections): self.config.remove_section(section) self.set_option_in_section("App", "HasBeenConfigured", "True") @@ -159,36 +157,6 @@ def get_video_sources(self): # Sources are invalid in config file. What should we do? raise RuntimeError("Invalid sources in config file") - def get_areas(self): - try: - areas = [] - cameras_list = [] - is_slack_enabled = self.config["App"]["SlackChannel"] and is_slack_configured() - is_email_enabled = is_mailing_configured() - config_dir = config_utils.get_area_config_directory(self) - area_logs_dir = get_area_log_directory(self) - for title, section in self.config.items(): - if title.startswith("Area_"): - area = Area(section, title, is_email_enabled, is_slack_enabled, config_dir, area_logs_dir) - areas.append(area) - elif title.startswith("Source_"): - cameras_list.append(self.config[title]["Id"]) - cameras_string = ",".join(cameras_list) - areas.append(Area.set_global_area(is_email_enabled, is_slack_enabled, config_dir, area_logs_dir, - cameras_string)) - return areas - except Exception: - # Sources are invalid in config file. What should we do? - raise RuntimeError("Invalid areas in config file") - - def get_area_all(self): - areas = self.get_areas() - area_all = next(area for area in areas if area.id == ALL_AREAS) - return area_all - - def get_area_config_path(self, area_id): - return os.path.join(config_utils.get_area_config_directory(self), area_id + ".json") - def should_send_email_notifications(self, entity): if "emails" in entity: if is_mailing_configured(): diff --git a/libs/entities/area.py b/libs/entities/area.py deleted file mode 100644 index e57afb53..00000000 --- a/libs/entities/area.py +++ /dev/null @@ -1,86 +0,0 @@ -import os -import json -from datetime import datetime -import pathlib - -from constants import ALL_AREAS -from .base_entity import BaseEntity -from .occupancy_rule import OccupancyRule -from libs.utils.utils import validate_file_exists_and_is_not_empty - - -class Area(BaseEntity): - - def __init__(self, config_section: dict, section_title: str, send_email_enabled: bool, send_slack_enabled: bool, - config_dir: str, logs_dir: str): - super().__init__(config_section, section_title, send_email_enabled, send_slack_enabled, config_dir, logs_dir) - self.type = "Area" - self.occupancy_threshold = int(config_section["OccupancyThreshold"]) - if "Cameras" in config_section and config_section["Cameras"].strip() != "": - self.cameras = config_section["Cameras"].split(",") - - if (self.notify_every_minutes > 0 and self.violation_threshold > 0) or self.occupancy_threshold > 0: - self.should_send_email_notifications = send_email_enabled and self.emails != [] - self.should_send_slack_notifications = send_slack_enabled and self.enable_slack_notifications - else: - self.should_send_email_notifications = False - self.should_send_slack_notifications = False - self.load_occupancy_rules() - - @classmethod - def set_global_area(cls, is_email_enabled, is_slack_enabled, config_dir, area_logs_dir, cameras_list): - pathlib.Path(config_dir).mkdir(parents=True, exist_ok=True) - config_path = os.path.join(config_dir, "ALL.json") - json_content = { - "global_area_all": { - "ViolationThreshold": 0, - "NotifyEveryMinutes": 0, - "Emails": "", - "EnableSlackNotifications": False, # "N/A" - "DailyReport": False, # "N/A" - "DailyReportTime": "N/A", - "OccupancyThreshold": 0, - "Id": ALL_AREAS, - "Name": ALL_AREAS, - } - } - - if not os.path.exists(config_path): - # Create the file with if necessary - with open(config_path, 'x') as outfile: - json.dump(json_content, outfile) - section = json_content["global_area_all"] - else: - # If file exists, we have to check if there is a key named: "global_area_all". - with open(config_path, "r+") as file: - file_content = json.load(file) - - if file_content.get("global_area_all") is None: - file_content["global_area_all"] = json_content["global_area_all"] - json.dump(file_content, file) - section = json_content["global_area_all"] - else: - section = file_content.get("global_area_all") - - section["Cameras"] = cameras_list - title = ALL_AREAS - - return Area(section, title, is_email_enabled, is_slack_enabled, config_dir, area_logs_dir) - - def load_occupancy_rules(self): - self.occupancy_rules = [] - area_config_path = self.get_config_path() - if validate_file_exists_and_is_not_empty(area_config_path): - with open(area_config_path) as json_file: - area_config = json.load(json_file) - if "occupancy_rules" not in area_config: - return - for rule in area_config["occupancy_rules"]: - self.occupancy_rules.append(OccupancyRule(rule)) - - def get_occupancy_threshold(self, date: datetime): - return next((rule.occupancy_threshold for rule in self.occupancy_rules if rule.date_is_included(date)), - self.occupancy_threshold) - - def get_config_path(self): - return os.path.join(self.config_dir, self.id + ".json") diff --git a/libs/loggers/area_loggers/__init__.py b/libs/loggers/area_loggers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/libs/loggers/area_loggers/file_system_logger.py b/libs/loggers/area_loggers/file_system_logger.py deleted file mode 100644 index cae1feaa..00000000 --- a/libs/loggers/area_loggers/file_system_logger.py +++ /dev/null @@ -1,36 +0,0 @@ -import csv -import os - -from datetime import date, datetime -from typing import List - -from constants import ALL_AREAS - - -class FileSystemLogger: - - def __init__(self, config, area: str, logger: str): - self.config = config - if area == ALL_AREAS: - self.area_id = ALL_AREAS - else: - self.area_id = self.config.get_section_dict(area)["Id"] - self.log_directory = config.get_section_dict(logger)["LogDirectory"] - self.occupancy_log_directory = os.path.join(self.log_directory, self.area_id, "occupancy_log") - os.makedirs(self.occupancy_log_directory, exist_ok=True) - self.submited_time = 0 - - def update(self, cameras: List[str], area_data: dict): - file_name = str(date.today()) - file_path = os.path.join(self.occupancy_log_directory, file_name + ".csv") - file_exists = os.path.isfile(file_path) - now = datetime.now() - current_time = now.strftime("%Y-%m-%d %H:%M:%S") - - with open(file_path, "a") as csvfile: - headers = ["Timestamp", "Cameras", "Occupancy"] - writer = csv.DictWriter(csvfile, fieldnames=headers) - if not file_exists: - writer.writeheader() - writer.writerow( - {"Timestamp": current_time, "Cameras": cameras, "Occupancy": area_data["occupancy"]}) diff --git a/libs/loggers/area_loggers/logger.py b/libs/loggers/area_loggers/logger.py deleted file mode 100644 index 5a03f10c..00000000 --- a/libs/loggers/area_loggers/logger.py +++ /dev/null @@ -1,16 +0,0 @@ -from typing import List - - -class Logger: - - def __init__(self, config, area: str, logger: str): - logger_name = config.get_section_dict(logger)["Name"] - self.logger = None - if logger_name == "file_system_logger": - from .file_system_logger import FileSystemLogger - self.logger = FileSystemLogger(config, area, logger) - else: - raise ValueError('Not supported logger named: ', logger_name) - - def update(self, cameras: List[str], area_data: dict): - self.logger.update(cameras, area_data) diff --git a/libs/metrics/__init__.py b/libs/metrics/__init__.py index 527afd3f..9eaa907c 100644 --- a/libs/metrics/__init__.py +++ b/libs/metrics/__init__.py @@ -1,5 +1,4 @@ from . social_distancing import SocialDistancingMetric # noqa from .face_mask_usage import FaceMaskUsageMetric # noqa -from .occupancy import OccupancyMetric # noqa from .in_out import InOutMetric # noqa from .dwell_time import DwellTimeMetric # noqa diff --git a/libs/metrics/base.py b/libs/metrics/base.py index 697f26c3..9a6b26dc 100644 --- a/libs/metrics/base.py +++ b/libs/metrics/base.py @@ -15,7 +15,7 @@ from pandas.api.types import is_numeric_dtype from libs.utils.config import get_source_config_directory -from libs.utils.loggers import get_source_log_directory, get_area_log_directory, get_source_logging_interval +from libs.utils.loggers import get_source_log_directory, get_source_logging_interval from libs.utils.utils import is_list_recursively_empty, validate_file_exists_and_is_not_empty logger = logging.getLogger(__name__) @@ -43,7 +43,6 @@ class BaseMetric: reports_folder = None csv_headers = [] csv_default_values = [] - # entity value can be "source" or "area" entity = "source" # Use the `live_csv_headers` when the csv strucutre differs from the hourly/daily live_csv_headers = [] @@ -56,9 +55,11 @@ def report_headers(cls): @classmethod def get_entity_base_directory(cls, config=None): + if cls.entity != "source": + raise NotImplementedError if config: - return get_source_log_directory(config) if cls.entity == "source" else get_area_log_directory(config) - return os.getenv("SourceLogDirectory") if cls.entity == "source" else os.getenv("AreaLogDirectory") + return get_source_log_directory(config) + return os.getenv("SourceLogDirectory") @classmethod def get_roi_file_path(cls, camera_id, config): @@ -76,7 +77,7 @@ def get_roi_contour(cls, roi_file_path): @classmethod def get_roi_contour_for_entity(cls, config, source_id): if cls.entity == "area": - return None + raise NotImplementedError return cls.get_roi_contour(cls.get_roi_file_path(source_id, config)) @staticmethod @@ -123,7 +124,9 @@ def ignore_objects_outside_roi(cls, csv_row, roi_contour): @classmethod def get_entities(cls, config): - return config.get_video_sources() if cls.entity == "source" else config.get_areas() + if cls.entity != "source": + raise NotImplementedError + return config.get_video_sources() @classmethod def process_metric_csv_row(cls, csv_row, object_logs): @@ -151,7 +154,9 @@ def generate_hourly_csv_data(cls, config, entity: Dict, entity_file: str, time_f time_until: datetime): roi_contour = cls.get_roi_contour_for_entity(config, entity["id"]) if not os.path.isfile(entity_file): - entity_type = "Camera" if cls.entity else "Area" + if not cls.entity: + raise NotImplementedError + entity_type = "Camera" logger.warn(f"The [{entity_type}: {entity['id']}] contains no recorded data for that day") return objects_logs = {} @@ -179,8 +184,7 @@ def compute_hourly_metrics(cls, config): if cls.entity == "source": log_directory = os.path.join(entity_directory, "objects_log") else: - # cls.entity == "area" - log_directory = os.path.join(entity_directory, "occupancy_log") + raise NotImplementedError reports_directory = os.path.join(entity_directory, "reports", cls.reports_folder) # Create missing directories os.makedirs(log_directory, exist_ok=True) @@ -201,7 +205,9 @@ def compute_hourly_metrics(cls, config): writer.writeheader() csv_data = cls.generate_hourly_csv_data(config, entity, entity_csv, time_from, time_until) if csv_data is None: - entity_type = "Camera" if cls.entity else "Area" + if not cls.entity: + raise NotImplementedError + entity_type = "Camera" logger.warn(f"Hourly report not generated! [{entity_type}: {entity['id']}]") continue with open(daily_csv, "a", newline='') as csvfile: @@ -234,7 +240,9 @@ def compute_daily_metrics(cls, config): hourly_csv = os.path.join(reports_directory, "report_" + yesterday + ".csv") report_csv = os.path.join(reports_directory, "report.csv") if not os.path.isfile(hourly_csv): - entity_type = "Camera" if cls.entity else "Area" + if not cls.entity: + raise NotImplementedError + entity_type = "Camera" logger.warn(f"Daily report for date {str(yesterday)} not generated! [{entity_type}: {entity['id']}]") continue daily_data = cls.generate_daily_csv_data(hourly_csv) @@ -272,8 +280,7 @@ def compute_live_metrics(cls, config, live_interval): if cls.entity == "source": log_directory = os.path.join(entity_directory, "objects_log") else: - # cls.entity == "area" - log_directory = os.path.join(entity_directory, "occupancy_log") + raise NotImplementedError today_entity_csv = os.path.join(log_directory, str(date.today()) + ".csv") live_report_csv = os.path.join(reports_directory, "live.csv") csv_headers = cls.live_csv_headers if cls.live_csv_headers else cls.csv_headers diff --git a/libs/metrics/occupancy.py b/libs/metrics/occupancy.py deleted file mode 100644 index 61c5dde9..00000000 --- a/libs/metrics/occupancy.py +++ /dev/null @@ -1,123 +0,0 @@ -import csv -import numpy as np -import os - -from collections import deque -from datetime import date, datetime -from statistics import mean -from typing import Dict, Iterator, List - -from .base import BaseMetric -from constants import OCCUPANCY - - -class OccupancyMetric(BaseMetric): - - reports_folder = OCCUPANCY - csv_headers = ["AverageOccupancy", "MaxOccupancy", "OccupancyThreshold"] - entity = "area" - live_csv_headers = ["AverageOccupancy", "MaxOccupancy", "OccupancyThreshold", "Violations"] - csv_default_values = [0, 0, 0, 0] - - @classmethod - def process_metric_csv_row(cls, csv_row: Dict, objects_logs: Dict): - row_time = datetime.strptime(csv_row["Timestamp"], "%Y-%m-%d %H:%M:%S") - row_hour = row_time.hour - if not objects_logs.get(row_hour): - objects_logs[row_hour] = {} - if not objects_logs[row_hour].get("Occupancy"): - objects_logs[row_hour]["Occupancy"] = [] - objects_logs[row_hour]["Occupancy"].append(int(csv_row["Occupancy"])) - - @classmethod - def generate_hourly_metric_data(cls, config, objects_logs, entity): - summary = np.zeros((len(objects_logs), 3), dtype=np.long) - now = datetime.now() - for index, hour in enumerate(sorted(objects_logs)): - start_hour_time = datetime(now.year, now.month, now.day, hour, 0) - end_hour_time = datetime(now.year, now.month, now.day, hour, 59) - occupancy_threshold = max(entity.get_occupancy_threshold(start_hour_time), - entity.get_occupancy_threshold(end_hour_time)) - summary[index] = ( - mean(objects_logs[hour].get("Occupancy", [0])), max(objects_logs[hour].get("Occupancy", [0])), - occupancy_threshold - ) - return summary - - @classmethod - def generate_daily_csv_data(cls, yesterday_hourly_file): - average_ocupancy = [] - max_occupancy = [] - threshold = 0 - with open(yesterday_hourly_file, newline='') as csvfile: - reader = csv.DictReader(csvfile) - for row in reader: - if int(row["AverageOccupancy"]): - average_ocupancy.append(int(row["AverageOccupancy"])) - max_occupancy.append(int(row["MaxOccupancy"])) - threshold = max(int(row["OccupancyThreshold"]), threshold) - if not average_ocupancy: - return 0, 0, threshold - return round(mean(average_ocupancy), 2), max(max_occupancy), threshold - - @classmethod - def generate_live_csv_data(cls, config, today_entity_csv, entity, entries_in_interval): - """ - Generates the live report using the `today_entity_csv` file received. - """ - - with open(today_entity_csv, "r") as log: - objects_logs = {} - last_entries = deque(csv.DictReader(log), entries_in_interval) - for entry in last_entries: - cls.process_csv_row(entry, objects_logs, None) - # Put the rows in the same hour - objects_logs_merged = { - 0: {"Occupancy": []} - } - for hour in objects_logs: - objects_logs_merged[0]["Occupancy"].extend(objects_logs[hour]["Occupancy"]) - occupancy_live = cls.generate_hourly_metric_data(config, objects_logs_merged, entity)[0].tolist() - daily_violations = 0 - entity_directory = entity.base_directory - reports_directory = os.path.join(entity_directory, "reports", cls.reports_folder) - file_path = os.path.join(reports_directory, "live.csv") - if os.path.exists(file_path): - with open(file_path, "r") as live_file: - last_entry = deque(csv.DictReader(live_file), 1)[0] - if datetime.strptime(last_entry["Time"], "%Y-%m-%d %H:%M:%S").date() == datetime.today().date(): - daily_violations = int(last_entry["Violations"]) - if occupancy_live[1] > occupancy_live[2]: - # Max Occupancy detections > Occupancy threshold - daily_violations += 1 - occupancy_live.append(daily_violations) - return occupancy_live - - @classmethod - def get_trend_live_values(cls, live_report_paths: Iterator[str]) -> Iterator[int]: - latest_occupancy_results = {} - for n in range(10): - latest_occupancy_results[n] = None - for live_path in live_report_paths: - with open(live_path, "r") as live_file: - lastest_10_entries = deque(csv.DictReader(live_file), 10) - for index, item in enumerate(lastest_10_entries): - if not latest_occupancy_results[index]: - latest_occupancy_results[index] = 0 - latest_occupancy_results[index] += int(item["MaxOccupancy"]) - return [item for item in latest_occupancy_results.values() if item is not None] - - @classmethod - def get_weekly_report(cls, entities: List[str], number_of_weeks: int = 0, - from_date: date = None, to_date: date = None) -> Dict: - # The occupancy metrics can not be aggregated using "sum" - weekly_report_data = cls.generate_weekly_report_data(entities, number_of_weeks, from_date, to_date) - report = {"Weeks": []} - for header in cls.csv_headers: - report[header] = [] - for week, week_data in weekly_report_data.items(): - report["Weeks"].append(week) - report["AverageOccupancy"].append(round(mean(week_data["AverageOccupancy"]), 2)) - report["MaxOccupancy"].append(max(week_data["MaxOccupancy"])) - report["OccupancyThreshold"].append(max(week_data["OccupancyThreshold"])) - return report diff --git a/libs/metrics/utils.py b/libs/metrics/utils.py index 6659fad2..33ff5d13 100644 --- a/libs/metrics/utils.py +++ b/libs/metrics/utils.py @@ -3,7 +3,6 @@ import pandas as pd from .face_mask_usage import FaceMaskUsageMetric -from .occupancy import OccupancyMetric from .social_distancing import SocialDistancingMetric from .in_out import InOutMetric from .dwell_time import DwellTimeMetric @@ -12,7 +11,6 @@ def compute_hourly_metrics(config): SocialDistancingMetric.compute_hourly_metrics(config) FaceMaskUsageMetric.compute_hourly_metrics(config) - OccupancyMetric.compute_hourly_metrics(config) InOutMetric.compute_hourly_metrics(config) DwellTimeMetric.compute_hourly_metrics(config) @@ -20,7 +18,6 @@ def compute_hourly_metrics(config): def compute_daily_metrics(config): SocialDistancingMetric.compute_daily_metrics(config) FaceMaskUsageMetric.compute_daily_metrics(config) - OccupancyMetric.compute_daily_metrics(config) InOutMetric.compute_daily_metrics(config) DwellTimeMetric.compute_daily_metrics(config) @@ -28,7 +25,6 @@ def compute_daily_metrics(config): def compute_live_metrics(config, live_interval): SocialDistancingMetric.compute_live_metrics(config, live_interval) FaceMaskUsageMetric.compute_live_metrics(config, live_interval) - OccupancyMetric.compute_live_metrics(config, live_interval) InOutMetric.compute_live_metrics(config, live_interval) DwellTimeMetric.compute_live_metrics(config, live_interval) diff --git a/libs/notifications/slack_notifications.py b/libs/notifications/slack_notifications.py index 112d2be6..4a7ed653 100644 --- a/libs/notifications/slack_notifications.py +++ b/libs/notifications/slack_notifications.py @@ -68,12 +68,8 @@ def occupancy_alert(self, entity_info, number, threshold): f"We found {number} people out of a capacity of {threshold}." self.post_message_to_channel(msg, self.channel) - def send_global_report(self, report_type, sources, areas, sources_violations_per_hour, areas_violations_per_hour): + def send_global_report(self, report_type, sources, sources_violations_per_hour): msg = f"*{report_type.capitalize()} Report:* \n\n" - msg += "*Areas:*\n" - for index, area in enumerate(areas): - entity_id, entity_name = area['id'], area['name'] - msg += f"*{entity_id}:* {entity_name} - {sum(areas_violations_per_hour[index])} Violations\n" msg += "\n*Cameras:*\n" for index, source in enumerate(sources): entity_id, entity_name = source['id'], source['name'] diff --git a/libs/processor_core.py b/libs/processor_core.py index a67923dc..cb596eed 100644 --- a/libs/processor_core.py +++ b/libs/processor_core.py @@ -6,7 +6,6 @@ from queue import Empty import schedule from libs.engine_threading import run_video_processing -from libs.area_threading import run_area_processing from libs.utils.notifications import run_check_violations logger = logging.getLogger(__name__) @@ -46,7 +45,6 @@ def start(self): def _setup_scheduled_tasks(self): logger.info("Setup scheduled tasks") sources = self.config.get_video_sources() - areas = self.config.get_areas() for src in sources: should_send_email_notifications = src.should_send_email_notifications should_send_slack_notifications = src.should_send_slack_notifications @@ -59,19 +57,6 @@ def _setup_scheduled_tasks(self): ).tag("notification-task") else: logger.info(f"should not send notification for camera {src['id']}") - for area in areas: - should_send_email_notifications = area.should_send_email_notifications - should_send_slack_notifications = area.should_send_slack_notifications - if should_send_email_notifications or should_send_slack_notifications: - interval = area.notify_every_minutes - violation_threshold = area.violation_threshold - if violation_threshold > 0: - schedule.every(interval).minutes.do( - run_check_violations, violation_threshold, self.config, area, interval, - should_send_email_notifications, should_send_slack_notifications - ).tag("notification-task") - else: - logger.info(f"should not send notification for camera {area.id}") def _serve(self): logger.info("Core is listening for commands ... ") @@ -137,16 +122,8 @@ def start_processing_sources(self): engines.append((send_conn, p)) return engines - def start_processing_areas(self): - recv_conn, send_conn = mp.Pipe(False) - p = mp.Process(target=run_area_processing, args=(self.config, recv_conn, self.config.get_areas())) - p.start() - return (send_conn, p) - def _start_processing(self): self._engines = self.start_processing_sources() - area_engine = self.start_processing_areas() - self._engines.append(area_engine) def _stop_processing(self): for (conn, proc) in self._engines: diff --git a/libs/reports/notifications.py b/libs/reports/notifications.py index 597b26fd..dd170f36 100644 --- a/libs/reports/notifications.py +++ b/libs/reports/notifications.py @@ -26,11 +26,7 @@ def get_daily_report(config, entity_info, report_date): os.path.join(reports_directory, SocialDistancingMetric.reports_folder ,'report_' + report_date + '.csv') ] else: - # entity == 'Area' - camera_ids = entity_info['cameras'] - daily_csv_file_paths = [ - os.path.join(log_directory, camera_id, f"reports/{SocialDistancingMetric.reports_folder}/report_" + report_date + ".csv") - for camera_id in camera_ids] + raise NotImplementedError for file_path in daily_csv_file_paths: violations_per_hour = [] @@ -61,32 +57,28 @@ def send_daily_report_notification(config, entity_info): slack_service.daily_report(entity_info, sum(violations_per_hour)) -def send_global_report(report_type, config, sources, areas, sources_violations_per_hour, areas_violations_per_hour): +def send_global_report(report_type, config, sources, sources_violations_per_hour): emails = config.get_section_dict("App")["GlobalReportingEmails"].split(",") if is_mailing_configured() and emails: ms = MailService(config) - ms.send_global_report(report_type, sources, areas, sources_violations_per_hour, areas_violations_per_hour) + ms.send_global_report(report_type, sources, sources_violations_per_hour) if is_slack_configured(): slack_service = SlackService(config) - slack_service.send_global_report(report_type, sources, areas, sources_violations_per_hour, areas_violations_per_hour) + slack_service.send_global_report(report_type, sources, sources_violations_per_hour) -def send_daily_global_report(config, sources, areas): +def send_daily_global_report(config, sources): yesterday = str(date.today() - timedelta(days=1)) sources_violations_per_hour = [get_daily_report(config, source, yesterday) for source in sources] - areas_violations_per_hour = [get_daily_report(config, area, yesterday) for area in areas] - send_global_report('daily', config, sources, areas, sources_violations_per_hour, areas_violations_per_hour) + send_global_report('daily', config, sources, sources_violations_per_hour) -def send_weekly_global_report(config, sources, areas): +def send_weekly_global_report(config, sources): weekly_sources_violations_per_hour = np.zeros((len(sources), 24)) - weekly_areas_violations_per_hour = np.zeros((len(areas), 24)) start_week = str(date.today() - timedelta(days=8)) yesterday = str(date.today() - timedelta(days=1)) date_range = pd.date_range(start=start_week, end=yesterday) for report_date in date_range: weekly_sources_violations_per_hour += np.array( [get_daily_report(config, source, report_date.strftime('%Y-%m-%d')) for source in sources]) - weekly_areas_violations_per_hour += np.array( - [get_daily_report(config, area, report_date.strftime('%Y-%m-%d')) for area in areas]) - send_global_report('weekly', config, sources, areas, weekly_sources_violations_per_hour, weekly_areas_violations_per_hour) + send_global_report('weekly', config, sources, weekly_sources_violations_per_hour) diff --git a/libs/utils/config.py b/libs/utils/config.py index 02845eb5..db81b361 100644 --- a/libs/utils/config.py +++ b/libs/utils/config.py @@ -1,6 +1,2 @@ -def get_area_config_directory(config): - return f"{config.get_section_dict('App')['EntityConfigDirectory']}/areas" - - def get_source_config_directory(config): return f"{config.get_section_dict('App')['EntityConfigDirectory']}/sources" diff --git a/libs/utils/loggers.py b/libs/utils/loggers.py index 2c894592..62a0a9eb 100644 --- a/libs/utils/loggers.py +++ b/libs/utils/loggers.py @@ -1,14 +1,6 @@ import os -def get_area_log_directory(config): - loggers_names = [x for x in config.get_sections() if x.startswith("AreaLogger_")] - for l_name in loggers_names: - logger_section = config.get_section_dict(l_name) - if logger_section["Name"] == "file_system_logger": - return logger_section["LogDirectory"] - - def get_source_log_directory(config): loggers_names = [x for x in config.get_sections() if x.startswith("SourceLogger_")] for l_name in loggers_names: @@ -22,11 +14,6 @@ def get_config_source_directory(config): return os.path.join(base_config_directory, "sources") -def get_config_areas_directory(config): - base_config_directory = config.get_section_dict("App")["EntityConfigDirectory"] - return os.path.join(base_config_directory, "areas") - - def get_source_logging_interval(config): loggers_names = [x for x in config.get_sections() if x.startswith("SourceLogger_")] for l_name in loggers_names: diff --git a/libs/utils/mail_global_report.html b/libs/utils/mail_global_report.html index b6a905ab..dadf2e71 100644 --- a/libs/utils/mail_global_report.html +++ b/libs/utils/mail_global_report.html @@ -49,15 +49,6 @@ style="max-width:100%;min-width:100%;border-collapse:collapse" width="100%" class="m_5283153553260840980mcnTextContentContainer"> - - -

- Areas: -

- - - {global_areas_report} diff --git a/libs/utils/mailing.py b/libs/utils/mailing.py index bab2f838..fae5af80 100644 --- a/libs/utils/mailing.py +++ b/libs/utils/mailing.py @@ -89,17 +89,10 @@ def send_occupancy_notification(self, entity_info, num_occupancy, threshold): self.send_email_notification(entity_info, subject, html_string) - def send_global_report(self, report_type, sources, areas, sources_violations_per_hour, areas_violations_per_hour): + def send_global_report(self, report_type, sources, sources_violations_per_hour): frontend_url = self.config.get_section_dict("App")["DashboardURL"] with codecs.open('libs/utils/mail_global_report.html', 'r') as f: html_string = f.read() - areas_report = "" - for index, area in enumerate(areas): - areas_report += self.fill_report_table( - "libs/utils/_global_entity_report.html", area, - sum(areas_violations_per_hour[index]), areas_violations_per_hour[index] - ) - html_string = html_string.replace('{global_areas_report}', areas_report) cameras_report = "" for index, source in enumerate(sources): cameras_report += self.fill_report_table( diff --git a/libs/utils/notifications.py b/libs/utils/notifications.py index a26988fd..0a899a2a 100644 --- a/libs/utils/notifications.py +++ b/libs/utils/notifications.py @@ -29,8 +29,7 @@ def check_violations(entity_type, threshold, config, entity_info, interval, shou file_paths = [os.path.join(log_dir, entity_info.id, "objects_log", today + ".csv")] else: # entity_type == 'Area' - camera_ids = entity_info.cameras - file_paths = [os.path.join(log_dir, camera_id, "objects_log", today + ".csv") for camera_id in camera_ids] + raise NotImplementedError for file_path in file_paths: violations += get_violations(file_path, interval) diff --git a/run_periodic_task.py b/run_periodic_task.py index 081d7309..1c848f93 100644 --- a/run_periodic_task.py +++ b/run_periodic_task.py @@ -42,24 +42,19 @@ def main(config): else: raise ValueError(f"Not supported periodic task named: {task_name}") - # Schedule daily/weekly reports for sources and areas + # Schedule daily/weekly reports for sources sources = config.get_video_sources() - areas = config.get_areas() for src in sources: if src['daily_report']: schedule.every().day.at(src['daily_report_time']).do( send_daily_report_notification, config=config, entity_info=src) - for area in areas: - if area.daily_report: - schedule.every().day.at(area.daily_report_time).do( - send_daily_report_notification, config=config, entity_info=area) if config.get_boolean("App", "DailyGlobalReport"): schedule.every().day.at(config.get_section_dict("App")["GlobalReportTime"]).do( - send_daily_global_report, config=config, sources=sources, areas=areas + send_daily_global_report, config=config, sources=sources ) if config.get_boolean("App", "WeeklyGlobalReport"): schedule.every(7).days.at(config.get_section_dict("App")["GlobalReportTime"]).do( - send_weekly_global_report, config=config, sources=sources, areas=areas + send_weekly_global_report, config=config, sources=sources ) while True: From 6bd34fc05ed021abf9d3aab0ddcadbfc01a78de6 Mon Sep 17 00:00:00 2001 From: Pablo Grill Date: Thu, 29 Jul 2021 17:31:28 -0300 Subject: [PATCH 2/7] Remove metrics. --- api/models/config.py | 8 - api/models/periodic_task.py | 10 +- api/processor_api.py | 2 - api/routers/cameras.py | 3 - api/routers/export.py | 50 - api/routers/metrics/__init__.py | 1 - api/routers/metrics/camera_metrics.py | 245 ---- api/routers/metrics/metrics.py | 99 -- api/tests/app/test_camera_metrics.py | 1012 ----------------- api/tests/data/config-x86-openvino_EMPTY.ini | 9 +- .../data/config-x86-openvino_JUST_CAMERAS.ini | 9 +- .../data/config-x86-openvino_METRICS.ini | 5 - config-coral.ini | 9 +- config-jetson-nano.ini | 9 +- config-jetson-tx2.ini | 9 +- config-x86-gpu-tensorrt.ini | 9 +- config-x86-gpu.ini | 9 +- config-x86-openvino.ini | 9 +- config-x86.ini | 9 +- constants.py | 6 - libs/backups/s3_backup.py | 26 +- libs/metrics/__init__.py | 4 - libs/metrics/base.py | 477 -------- libs/metrics/dwell_time.py | 217 ---- libs/metrics/face_mask_usage.py | 136 --- libs/metrics/in_out.py | 302 ----- libs/metrics/social_distancing.py | 205 ---- libs/metrics/utils.py | 64 -- libs/reports/notifications.py | 3 +- run_periodic_task.py | 13 +- 30 files changed, 15 insertions(+), 2954 deletions(-) delete mode 100644 api/routers/metrics/__init__.py delete mode 100644 api/routers/metrics/camera_metrics.py delete mode 100644 api/routers/metrics/metrics.py delete mode 100644 api/tests/app/test_camera_metrics.py delete mode 100644 libs/metrics/__init__.py delete mode 100644 libs/metrics/base.py delete mode 100644 libs/metrics/dwell_time.py delete mode 100644 libs/metrics/face_mask_usage.py delete mode 100644 libs/metrics/in_out.py delete mode 100644 libs/metrics/social_distancing.py delete mode 100644 libs/metrics/utils.py diff --git a/api/models/config.py b/api/models/config.py index 833c2b26..6fd2d631 100644 --- a/api/models/config.py +++ b/api/models/config.py @@ -46,18 +46,10 @@ class Config: } -class ConfigMetrics(BaseModel): - social_distancing: bool - facemask: bool - occupancy: bool - in_out: bool - - class ConfigInfo(BaseModel): version: str device: str has_been_configured: bool - metrics: ConfigMetrics class Config: schema_extra = { diff --git a/api/models/periodic_task.py b/api/models/periodic_task.py index eab663cd..37d4e0f6 100644 --- a/api/models/periodic_task.py +++ b/api/models/periodic_task.py @@ -11,15 +11,11 @@ class PeriodicTaskDTO(OptionalSectionConfig): @validator("name") def validate_name(cls, value): - if value not in ["metrics", "s3_backup"]: + if value not in ["s3_backup"]: raise ValueError(f"Not supported periodic task named: {value}") return value -class MetricsTaksDTO(PeriodicTaskDTO): - liveInterval: int = Field(example=10) - - class S3Backup(PeriodicTaskDTO): backupInterval: Optional[int] = Field(example=30) backupS3Bucket: str = Field(example="your-s3-bucket") @@ -31,8 +27,6 @@ class PeriodicTaskListDTO(SnakeModel): def validate_periodic_task(task: PeriodicTaskDTO): task_model = None - if task.name == "metrics": - task_model = MetricsTaksDTO - elif task.name == "s3_backup": + if task.name == "s3_backup": task_model = S3Backup task_model(**task.dict()) diff --git a/api/processor_api.py b/api/processor_api.py index f3845cab..9e3e7b4e 100644 --- a/api/processor_api.py +++ b/api/processor_api.py @@ -22,7 +22,6 @@ from .routers.config import config_router from .routers.detector import detector_router from .routers.export import export_router -from .routers.metrics import camera_metrics_router from .routers.periodic_tasks import periodic_tasks_router from .routers.slack import slack_router from .routers.source_loggers import source_loggers_router @@ -80,7 +79,6 @@ def create_fastapi_app(self): tags=["Source Post Processors"], dependencies=dependencies) app.include_router(source_loggers_router, prefix="/source_loggers", tags=["Source Loggers"], dependencies=dependencies) app.include_router(periodic_tasks_router, prefix="/periodic_tasks", tags=["Periodic Tasks"], dependencies=dependencies) - app.include_router(camera_metrics_router, prefix="/metrics/cameras", tags=["Metrics"], dependencies=dependencies) app.include_router(export_router, prefix="/export", tags=["Export"], dependencies=dependencies) app.include_router(slack_router, prefix="/slack", tags=["Slack"], dependencies=dependencies) app.include_router(auth_router, prefix="/auth", tags=["Auth"]) diff --git a/api/routers/cameras.py b/api/routers/cameras.py index 98166ca8..e8138f99 100644 --- a/api/routers/cameras.py +++ b/api/routers/cameras.py @@ -23,7 +23,6 @@ from api.models.camera import (CameraDTO, CamerasListDTO, CreateCameraDTO, ImageModel, VideoLiveFeedModel, ContourRoI, InOutBoundaries) from libs.source_post_processors.objects_filtering import ObjectsFilteringPostProcessor -from libs.metrics.in_out import InOutMetric from libs.utils.utils import validate_file_exists_and_is_not_empty logger = logging.getLogger(__name__) @@ -45,8 +44,6 @@ def map_camera(camera_name, config, options=[]): camera_dict["has_been_calibrated"] = validate_file_exists_and_is_not_empty(calibration_file_path) roi_file_path = ObjectsFilteringPostProcessor.get_roi_file_path(camera_id, settings.config) camera_dict["has_defined_roi"] = validate_file_exists_and_is_not_empty(roi_file_path) - in_out_file_path = InOutMetric.get_in_out_file_path(camera_id, settings.config) - camera_dict["has_in_out_border"] = validate_file_exists_and_is_not_empty(in_out_file_path) return camera_dict diff --git a/api/routers/export.py b/api/routers/export.py index f0deed65..d03b9b09 100644 --- a/api/routers/export.py +++ b/api/routers/export.py @@ -12,7 +12,6 @@ from api.models.export import ExportDTO, ExportDataType from api.utils import extract_config, clean_up_file -from libs.metrics import FaceMaskUsageMetric, SocialDistancingMetric, InOutMetric, DwellTimeMetric logger = logging.getLogger(__name__) @@ -21,11 +20,6 @@ # Define the exports data types as constants ALL_DATA = ExportDataType.all_data RAW_DATA = ExportDataType.raw_data -SOCIAL_DISTANCING = ExportDataType.social_distancing -FACEMASK_USAGE = ExportDataType.facemask_usage -IN_OUT = ExportDataType.in_out -OCCUPANCY = ExportDataType.occupancy -DWELL_TIME = ExportDataType.dwell_time def export_folder_into_zip(source_path, destination_path, zip_file, from_date, to_date): @@ -84,50 +78,6 @@ def export_camera_data_into_file(export_info: ExportDTO, camera_id: str, camera_ export_info.from_date, export_info.to_date ) - if ALL_DATA in export_info.data_types or SOCIAL_DISTANCING in export_info.data_types: - social_ditancing_reports_folder = f"reports/{SocialDistancingMetric.reports_folder}" - social_ditancing_reports_path = os.path.join( - os.getenv("SourceLogDirectory"), camera_id, social_ditancing_reports_folder) - export_folder_into_zip( - social_ditancing_reports_path, - os.path.join("cameras", f"{camera_id}-{camera_name}", social_ditancing_reports_folder), - zip_file, - export_info.from_date, - export_info.to_date - ) - if ALL_DATA in export_info.data_types or DWELL_TIME in export_info.data_types: - dwell_time_reports_folder = f"reports/{DwellTimeMetric.reports_folder}" - dwell_time_reports_path = os.path.join( - os.getenv("SourceLogDirectory"), camera_id, dwell_time_reports_folder) - export_folder_into_zip( - dwell_time_reports_path, - os.path.join("cameras", f"{camera_id}-{camera_name}", dwell_time_reports_folder), - zip_file, - export_info.from_date, - export_info.to_date - ) - if ALL_DATA in export_info.data_types or FACEMASK_USAGE in export_info.data_types: - face_mask_reports_folder = f"reports/{FaceMaskUsageMetric.reports_folder}" - face_mask_reports_path = os.path.join( - os.getenv("SourceLogDirectory"), camera_id, face_mask_reports_folder) - export_folder_into_zip( - face_mask_reports_path, - os.path.join("cameras", f"{camera_id}-{camera_name}", face_mask_reports_folder), - zip_file, - export_info.from_date, - export_info.to_date - ) - if ALL_DATA in export_info.data_types or IN_OUT in export_info.data_types: - in_out_reports_folder = f"reports/{InOutMetric.reports_folder}" - in_out_reports_path = os.path.join( - os.getenv("SourceLogDirectory"), camera_id, in_out_reports_folder) - export_folder_into_zip( - in_out_reports_path, - os.path.join("cameras", f"{camera_id}-{camera_name}", in_out_reports_folder), - zip_file, - export_info.from_date, - export_info.to_date - ) @export_router.put("") diff --git a/api/routers/metrics/__init__.py b/api/routers/metrics/__init__.py deleted file mode 100644 index 332e0daa..00000000 --- a/api/routers/metrics/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .camera_metrics import metrics_router as camera_metrics_router # noqa diff --git a/api/routers/metrics/camera_metrics.py b/api/routers/metrics/camera_metrics.py deleted file mode 100644 index 2a8ebab4..00000000 --- a/api/routers/metrics/camera_metrics.py +++ /dev/null @@ -1,245 +0,0 @@ -from datetime import date, timedelta -from fastapi import APIRouter, Query, HTTPException, status -from typing import Optional - -from api.models.metrics import ( - FaceMaskDaily, FaceMaskLive, FaceMaskHourly, FaceMaskWeekly, HeatmapReport, - SocialDistancingDaily, SocialDistancingHourly, SocialDistancingLive, - SocialDistancingWeekly, InOutDaily, InOutLive, InOutHourly, InOutWeekly, - DwellTimeDaily, DwellTimeHourly, DwellTimeLive, DwellTimeWeekly) -from api.utils import bad_request_serializer -from constants import CAMERAS, FACEMASK_USAGE, SOCIAL_DISTANCING, IN_OUT, DWELL_TIME -from libs.metrics.utils import generate_heatmap - -from .metrics import (validate_camera_existence, get_live_metric, get_hourly_metric, get_daily_metric, - get_weekly_metric, validate_dates) - -metrics_router = APIRouter() - - -@metrics_router.get("/{camera_id}/heatmap", response_model=HeatmapReport) -def get_heatmap(camera_id: str, - from_date: date = Query((date.today() - timedelta(days=date.today().weekday(), weeks=4))), - to_date: date = Query(date.today()), - report_type: Optional[str] = "violations"): - """ - Returns a heatmap image displaying the violations/detections detected by the camera - """ - validate_camera_existence(camera_id) - validate_dates(from_date, to_date) - if report_type in ["violations", "detections"]: - return generate_heatmap(camera_id, from_date, to_date, report_type) - else: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=bad_request_serializer("Invalid report_type", error_type="invalid config") - ) - - -# Social Distancing Metrics -@metrics_router.get("/social-distancing/live", response_model=SocialDistancingLive) -def get_camera_distancing_live(cameras: str = ""): - """ - Returns a report with live information about the social distancing infractions - detected in the cameras . - """ - return get_live_metric(CAMERAS, cameras, SOCIAL_DISTANCING) - - -@metrics_router.get("/social-distancing/hourly", response_model=SocialDistancingHourly) -def get_camera_distancing_hourly_report(cameras: str = "", date: date = Query(date.today())): - """ - Returns a hourly report (for the date specified) with information about the social distancing infractions - detected in the cameras . - """ - return get_hourly_metric(CAMERAS, cameras, SOCIAL_DISTANCING, date) - - -@metrics_router.get("/social-distancing/daily", response_model=SocialDistancingDaily) -def get_camera_distancing_daily_report(cameras: str = "", - from_date: date = Query((date.today() - timedelta(days=3))), - to_date: date = Query(date.today())): - """ - Returns a daily report (for the date range specified) with information about the social distancing infractions - detected in the cameras . - """ - return get_daily_metric(CAMERAS, cameras, SOCIAL_DISTANCING, from_date, to_date) - - -@metrics_router.get("/social-distancing/weekly", response_model=SocialDistancingWeekly) -def get_camera_distancing_weekly_report( - cameras: str = "", - weeks: int = Query(0), - from_date: date = Query((date.today() - timedelta(days=date.today().weekday(), weeks=4))), - to_date: date = Query(date.today())): - """ - Returns a weekly report (for the date range specified) with information about the social distancing - infractions detected in the cameras . - - **If `weeks` is provided and is a positive number:** - - `from_date` and `to_date` are ignored. - - Report spans from `weeks*7 + 1` days ago to yesterday. - - Taking yesterday as the end of week. - - **Else:** - - Report spans from `from_Date` to `to_date`. - - Taking Sunday as the end of week - """ - return get_weekly_metric(CAMERAS, cameras, SOCIAL_DISTANCING, from_date, to_date, weeks) - - -# Dwell Time Metrics -@metrics_router.get("/dwell-time/live", response_model=DwellTimeLive) -def get_camera_dwell_time_live(cameras: str = ""): - """ - Returns a report with live information about the dwell time of people - detected in the cameras . - """ - return get_live_metric(CAMERAS, cameras, DWELL_TIME) - - -@metrics_router.get("/dwell-time/hourly", response_model=DwellTimeHourly) -def get_camera_dwell_time_hourly_report(cameras: str = "", date: date = Query(date.today())): - """ - Returns a hourly report (for the date specified) with information about the dwell being of people - detected in the cameras . - """ - return get_hourly_metric(CAMERAS, cameras, DWELL_TIME, date) - - -@metrics_router.get("/dwell-time/daily", response_model=DwellTimeDaily) -def get_camera_dwell_time_daily_report(cameras: str = "", - from_date: date = Query((date.today() - timedelta(days=3))), - to_date: date = Query(date.today())): - """ - Returns a daily report (for the date range specified) with information about the dwell time of the peoples - detected in the cameras . - """ - return get_daily_metric(CAMERAS, cameras, DWELL_TIME, from_date, to_date) - - -@metrics_router.get("/dwell-time/weekly", response_model=DwellTimeWeekly) -def get_camera_dwell_time_weekly_report( - cameras: str = "", - weeks: int = Query(0), - from_date: date = Query((date.today() - timedelta(days=date.today().weekday(), weeks=4))), - to_date: date = Query(date.today())): - """ - Returns a weekly report (for the date range specified) with information about the swell time - of people detected in the cameras . - - **If `weeks` is provided and is a positive number:** - - `from_date` and `to_date` are ignored. - - Report spans from `weeks*7 + 1` days ago to yesterday. - - Taking yesterday as the end of week. - - **Else:** - - Report spans from `from_Date` to `to_date`. - - Taking Sunday as the end of week - """ - return get_weekly_metric(CAMERAS, cameras, DWELL_TIME, from_date, to_date, weeks) - - -# Face Mask Metrics -@metrics_router.get("/face-mask-detections/live", response_model=FaceMaskLive) -def get_camera_face_mask_detections_live(cameras: str = ""): - """ - Returns a report with live information about the facemasks detected in the - cameras . - """ - return get_live_metric(CAMERAS, cameras, FACEMASK_USAGE) - - -@metrics_router.get("/face-mask-detections/hourly", response_model=FaceMaskHourly) -def get_camera_face_mask_detections_hourly_report(cameras: str = "", date: date = Query(date.today())): - """ - Returns a hourly report (for the date specified) with information about the facemasks detected in - the cameras . - """ - return get_hourly_metric(CAMERAS, cameras, FACEMASK_USAGE, date) - - -@metrics_router.get("/face-mask-detections/daily", response_model=FaceMaskDaily) -def get_camera_face_mask_detections_daily_report(cameras: str = "", - from_date: date = Query((date.today() - timedelta(days=3))), - to_date: date = Query(date.today())): - """ - Returns a daily report (for the date range specified) with information about the facemasks detected in - the cameras . - """ - return get_daily_metric(CAMERAS, cameras, FACEMASK_USAGE, from_date, to_date) - - -@metrics_router.get("/face-mask-detections/weekly", response_model=FaceMaskWeekly) -def get_camera_face_mask_detections_weekly_report( - cameras: str = "", - weeks: int = Query(0), - from_date: date = Query((date.today() - timedelta(days=date.today().weekday(), weeks=4))), - to_date: date = Query(date.today())): - """ - Returns a weekly report (for the date range specified) with information about the facemasks detected in - the cameras . - - **If `weeks` is provided and is a positive number:** - - `from_date` and `to_date` are ignored. - - Report spans from `weeks*7 + 1` days ago to yesterday. - - Taking yesterday as the end of week. - - **Else:** - - Report spans from `from_Date` to `to_date`. - - Taking Sunday as the end of week - """ - return get_weekly_metric(CAMERAS, cameras, FACEMASK_USAGE, from_date, to_date, weeks) - - -# In Out Metrics -@metrics_router.get("/in-out/live", response_model=InOutLive) -def get_camera_in_out_live(cameras: str = ""): - """ - Returns a report with live information about the in-out flow detected in the - cameras . - """ - return get_live_metric(CAMERAS, cameras, IN_OUT) - - -@metrics_router.get("/in-out/hourly", response_model=InOutHourly) -def get_camera_in_out_hourly_report(cameras: str = "", date: date = Query(date.today().isoformat())): - """ - Returns a hourly report (for the date specified) with information about the in-out flow detected in - the cameras . - """ - return get_hourly_metric(CAMERAS, cameras, IN_OUT, date) - - -@metrics_router.get("/in-out/daily", response_model=InOutDaily) -def get_camera_in_out_daily_report( - cameras: str = "", - from_date: date = Query((date.today() - timedelta(days=3)).isoformat()), - to_date: date = Query(date.today().isoformat())): - """ - Returns a daily report (for the date range specified) with information about the in-out flow detected in - the cameras . - """ - return get_daily_metric(CAMERAS, cameras, IN_OUT, from_date, to_date) - - -@metrics_router.get("/in-out/weekly", response_model=InOutWeekly) -def get_camera_in_out_weekly_report( - cameras: str = "", - weeks: int = Query(0), - from_date: date = Query((date.today() - timedelta(days=date.today().weekday(), weeks=4)).isoformat()), - to_date: date = Query(date.today().isoformat())): - """ - Returns a weekly report (for the date range specified) with information about the in-out flow detected in - the cameras . - - **If `weeks` is provided and is a positive number:** - - `from_date` and `to_date` are ignored. - - Report spans from `weeks*7 + 1` days ago to yesterday. - - Taking yesterday as the end of week. - - **Else:** - - Report spans from `from_Date` to `to_date`. - - Taking Sunday as the end of week - """ - return get_weekly_metric(CAMERAS, cameras, IN_OUT, from_date, to_date, weeks) diff --git a/api/routers/metrics/metrics.py b/api/routers/metrics/metrics.py deleted file mode 100644 index bb42bdef..00000000 --- a/api/routers/metrics/metrics.py +++ /dev/null @@ -1,99 +0,0 @@ -import os - -from datetime import date -from fastapi import HTTPException, status -from typing import Iterator - -from api.utils import bad_request_serializer, extract_config -from constants import CAMERAS, FACEMASK_USAGE, SOCIAL_DISTANCING, IN_OUT, DWELL_TIME -from libs.metrics import FaceMaskUsageMetric, SocialDistancingMetric, InOutMetric, DwellTimeMetric - - -CAMERAS_METRICS = [SOCIAL_DISTANCING, FACEMASK_USAGE, IN_OUT] - - -def get_cameras(cameras: str) -> Iterator[str]: - if cameras: - return cameras.split(",") - config = extract_config(config_type=CAMERAS) - return [x["Id"] for x in config.values()] - - -def get_all_cameras() -> Iterator[str]: - config = extract_config(config_type=CAMERAS) - return [x["Id"] for x in config.values()] - - -def validate_camera_existence(camera_id: str): - dir_path = os.path.join(os.getenv("SourceLogDirectory"), camera_id, "objects_log") - if not os.path.exists(dir_path): - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Camera with id '{camera_id}' does not exist") - - -def validate_dates(from_date: date, to_date: date): - if from_date > to_date: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=bad_request_serializer( - "Invalid range of dates", - error_type="from_date doesn't come before to_date", - loc=["query", "from_date"] - ) - ) - -def get_entities(entity: str, entities_ids: str, metric: str): - entities = [] - if entity == CAMERAS: - entities = get_cameras(entities_ids) - for e in entities: - validate_camera_existence(e) - else: - # entities == AREAS - raise NotImplementedError - return entities - - -def get_metric_class(metric: str): - if metric == SOCIAL_DISTANCING: - return SocialDistancingMetric - elif metric == DWELL_TIME: - return DwellTimeMetric - elif metric == FACEMASK_USAGE: - return FaceMaskUsageMetric - elif metric == IN_OUT: - return InOutMetric - else: - raise ValueError(f"Metric {metric} not supported.") - - -def get_live_metric(entity: str, entities_ids: str, metric: str): - entities = get_entities(entity, entities_ids, metric) - metric_class = get_metric_class(metric) - return metric_class.get_live_report(entities) - - -def get_hourly_metric(entity: str, entities_ids: str, metric: str, date: date): - entities = get_entities(entity, entities_ids, metric) - metric_class = get_metric_class(metric) - return metric_class.get_hourly_report(entities, date) - - -def get_daily_metric(entity: str, entities_ids: str, metric: str, from_date: date, - to_date: date): - validate_dates(from_date, to_date) - entities = get_entities(entity, entities_ids, metric) - metric_class = get_metric_class(metric) - return metric_class.get_daily_report(entities, from_date, to_date) - - -def get_weekly_metric(entity: str, entities_ids: str, metric: str, from_date: date, - to_date: date, weeks: int): - entities = get_entities(entity, entities_ids, metric) - metric_class = get_metric_class(metric) - if weeks > 0: - # Report from weeks*7 days ago (grouped by week, ending on yesterday) - return metric_class.get_weekly_report(entities, number_of_weeks=weeks) - else: - # Report from the defined date_range, weeks ending on Sunday. - validate_dates(from_date, to_date) - return metric_class.get_weekly_report(entities, from_date=from_date, to_date=to_date) diff --git a/api/tests/app/test_camera_metrics.py b/api/tests/app/test_camera_metrics.py deleted file mode 100644 index 24f1707d..00000000 --- a/api/tests/app/test_camera_metrics.py +++ /dev/null @@ -1,1012 +0,0 @@ -import os -import pytest -from freezegun import freeze_time -import numpy as np -# The line below is absolutely necessary. Fixtures are passed as arguments to test functions. That is why IDE could -# not recognized them. -from api.tests.utils.fixtures_tests import config_rollback_cameras, heatmap_simulation, config_rollback - -HEATMAP_PATH_PREFIX = "/repo/api/tests/data/mocked_data/data/processor/static/data/sources/" - - -# pytest -v api/tests/app/test_camera_metrics.py::TestsGetHeatmap -class TestsGetHeatmap: - """ Get Heatmap, GET /metrics/cameras/{camera_id}/heatmap """ - """ - Returns a heatmap image displaying the violations/detections detected by the camera . - """ - - def test_get_one_heatmap_properly(self, config_rollback_cameras, heatmap_simulation): - # Make the request - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - - response = client.get(f"/metrics/cameras/{camera_id}/heatmap?from_date=2020-09-19&to_date=2020-09-19") - - # Get the heatmap - heatmap_path = os.path.join(HEATMAP_PATH_PREFIX, camera_id, "heatmaps", "violations_heatmap_2020-09-19.npy") - heatmap = np.load(heatmap_path).tolist() - - # Compare results - assert response.status_code == 200 - assert response.json()["heatmap"] == heatmap - - def test_try_get_two_heatmaps(self, config_rollback_cameras, heatmap_simulation): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - - response = client.get(f"/metrics/cameras/{camera_id}/heatmap?from_date=2020-09-19&to_date=2020-09-20") - - heatmap_path = os.path.join(HEATMAP_PATH_PREFIX, camera_id, "heatmaps", "violations_heatmap_2020-09-19.npy") - heatmap = np.load(heatmap_path).tolist() - - assert response.status_code == 200 - assert response.json()["heatmap"] == heatmap - assert response.json()["not_found_dates"] == ["2020-09-20"] - - def test_get_two_valid_heatmaps(self, config_rollback_cameras, heatmap_simulation): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - - response = client.get(f"/metrics/cameras/{camera_id}/heatmap?from_date=2020-09-19&to_date=2020-09-22") - - heatmap_path_1 = os.path.join(HEATMAP_PATH_PREFIX, camera_id, "heatmaps", "violations_heatmap_2020-09-19.npy") - heatmap_path_2 = os.path.join(HEATMAP_PATH_PREFIX, camera_id, "heatmaps", "violations_heatmap_2020-09-22.npy") - heatmap_1 = np.load(heatmap_path_1) - heatmap_2 = np.load(heatmap_path_2) - final_heatmap = np.add(heatmap_1, heatmap_2).tolist() - - assert response.status_code == 200 - assert response.json()["not_found_dates"] == ['2020-09-20', '2020-09-21'] - assert response.json()['heatmap'] == final_heatmap - - def test_get_one_heatmap_properly_detections(self, config_rollback_cameras, heatmap_simulation): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - - response = client.get( - f"/metrics/cameras/{camera_id}/heatmap?from_date=2020-09-19&to_date=2020-09-19&report_type=detections") - - heatmap_path = os.path.join(HEATMAP_PATH_PREFIX, camera_id, "heatmaps", "detections_heatmap_2020-09-19.npy") - heatmap = np.load(heatmap_path).tolist() - - assert response.status_code == 200 - assert response.json()["heatmap"] == heatmap - - def test_try_get_one_heatmap_bad_camera_id(self, config_rollback_cameras, heatmap_simulation): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = "wrong_id" - - response = client.get(f"/metrics/cameras/{camera_id}/heatmap?from_date=2020-09-19&to_date=2020-09-19") - - assert response.status_code == 404 - assert response.json() == {'detail': "Camera with id 'wrong_id' does not exist"} - - def test_try_get_one_heatmap_bad_report_type(self, config_rollback_cameras, heatmap_simulation): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - - response = client.get( - f"/metrics/cameras/{camera_id}/heatmap?from_date=2020-09-19&to_date=2020-09-19&report_type" - f"=non_existent_report_type") - - assert response.status_code == 400 - assert response.json() == {'detail': [{'loc': [], 'msg': 'Invalid report_type', 'type': 'invalid config'}]} - - def test_try_get_one_heatmap_bad_dates(self, config_rollback_cameras, heatmap_simulation): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - - response = client.get(f"/metrics/cameras/{camera_id}/heatmap?from_date=today&to_date=tomorrow") - - assert response.status_code == 400 - assert response.json() == {'detail': [{'loc': ['query', 'from_date'], 'msg': '' - 'invalid date format', - 'type': 'value_error.date'}, - {'loc': ['query', 'to_date'], 'msg': 'invalid date format', - 'type': 'value_error.date'}], 'body': None} - - def test_try_get_one_heatmap_wrong_dates(self, config_rollback_cameras, heatmap_simulation): - """from_date is after to_date""" - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - - response = client.get(f"/metrics/cameras/{camera_id}/heatmap?from_date=2020-09-20&to_date=2020-09-19") - - assert response.status_code == 400 - - def test_try_get_one_heatmap_only_from_date(self, config_rollback_cameras, heatmap_simulation): - """ Note that here as we do not send to_date, default value will take place, and to_date will be - date.today(). - WARNING: We could not mock the date.today() when the function is called within default query parameters. - So, we must be careful because the data range will be: "2021-01-10" - "today". - """ - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - from_date = "2021-01-10" - - response = client.get(f"/metrics/cameras/{camera_id}/heatmap?from_date={from_date}") - - assert response.status_code == 200 - - def test_try_get_one_heatmap_only_to_date(self, config_rollback_cameras, heatmap_simulation): - """ Note that here as we do not send from_date, default value will take place, and from_date will be - date.today(). - WARNING: We could not mock the date.today() when the function is called within default query parameters. - So, we must be careful because the data range will be: "date.today() - timedelta(days=date.today().weekday(), - weeks=4)" - "2020-09-20" and this date range is probably wrong because from_date will be later than to_date. - """ - - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - to_date = "2020-09-20" - - response = client.get(f"/metrics/cameras/{camera_id}/heatmap?to_date={to_date}") - - assert response.status_code == 400 - - -# pytest -v api/tests/app/test_camera_metrics.py::TestsGetCameraDistancingLive -class TestsGetCameraDistancingLive: - """ Get Camera Distancing Live, GET /metrics/cameras/social-distancing/live """ - """ - Returns a report with live information about the social distancing infractions detected in the cameras. - """ - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", { - 'time': '2021-02-19 13:37:58', - 'trend': 0.05, - 'detected_objects': 6, - 'no_infringement': 5, - 'low_infringement': 0, - 'high_infringement': 1, - 'critical_infringement': 0 - }), - ("face-mask-detections", { - 'time': '2021-02-19 13:37:58', - 'trend': 0.0, - 'no_face': 10, - 'face_with_mask': 0, - 'face_without_mask': 0 - }) - ] - ) - def test_get_a_report_properly(self, config_rollback_cameras, metric, expected): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - - response = client.get(f"/metrics/cameras/{metric}/live?cameras={camera_id}") - - assert response.json() == expected - assert response.status_code == 200 - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", { - 'time': '2021-02-19 13:37:58', 'trend': 0.72, 'detected_objects': 20, 'no_infringement': 9, - 'low_infringement': 7, 'high_infringement': 2, 'critical_infringement': 3 - }), - ("face-mask-detections", { - 'time': '2021-02-19 13:37:58', 'trend': 0.52, 'no_face': 24, 'face_with_mask': 8, 'face_without_mask': 1 - }) - ] - ) - def test_get_a_report_two_valid_cameras(self, config_rollback_cameras, metric, expected): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id_1 = camera["id"] - camera_id_2 = camera_2["id"] - - response = client.get(f"/metrics/cameras/{metric}/live?cameras={camera_id_1},{camera_id_2}") - - assert response.json() == expected - assert response.status_code == 200 - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", {'detail': "Camera with id 'BAD_ID' does not exist"}), - ("face-mask-detections", {'detail': "Camera with id 'BAD_ID' does not exist"}) - ] - ) - def test_try_get_a_report_bad_id_camera(self, config_rollback_cameras, metric, expected): - camera, camera_2, client, config_sample_path = config_rollback_cameras - - response = client.get(f"/metrics/cameras/{metric}/live?cameras=BAD_ID") - - assert response.json() == expected - assert response.status_code == 404 - - -# pytest -v api/tests/app/test_camera_metrics.py::TestsGetCameraDistancingHourlyReport -class TestsGetCameraDistancingHourlyReport: - """ Get Camera Distancing Hourly Report , GET /metrics/cameras/social-distancing/hourly """ - """ - Returns a hourly report (for the date specified) with information about - the social distancing infractions detected in the cameras . - """ - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", { - 'detected_objects': [54, 30, 19, 37, 27, 39, 44, 25, 51, 31, 47, 39, 16, 26, 67, 29, 36, 17, 31, 32, 19, - 38, - 34, 50], - 'no_infringement': [13, 5, 2, 18, 5, 11, 10, 6, 14, 6, 17, 18, 4, 8, 17, 11, 3, 6, 7, 4, 6, 10, 11, 18], - 'low_infringement': [10, 14, 4, 19, 11, 15, 7, 7, 11, 2, 1, 3, 10, 10, 19, 7, 15, 5, 5, 16, 4, 12, 13, - 17], - 'high_infringement': [16, 2, 3, 0, 8, 1, 16, 11, 12, 6, 15, 0, 0, 1, 14, 7, 10, 2, 1, 9, 8, 13, 0, 15], - 'critical_infringement': [15, 9, 10, 0, 3, 12, 11, 1, 14, 17, 14, 18, 2, 7, 17, 4, 8, 4, 18, 3, 1, 3, - 10, - 0], - 'hours': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - }), - ("face-mask-detections", { - 'no_face': [3, 3, 9, 2, 8, 2, 9, 8, 8, 0, 1, 2, 4, 6, 6, 2, 5, 2, 0, 0, 8, 3, 1, 2], - 'face_with_mask': [5, 4, 6, 9, 2, 3, 9, 7, 7, 3, 8, 3, 6, 7, 4, 2, 0, 1, 4, 1, 9, 5, 1, 4], - 'face_without_mask': [2, 6, 0, 8, 7, 7, 9, 1, 9, 8, 6, 4, 5, 7, 1, 0, 7, 5, 3, 3, 3, 8, 6, 5], - 'hours': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - }) - ] - ) - def test_get_an_hourly_report_properly(self, config_rollback_cameras, metric, expected): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - date = "2021-02-25" - - response = client.get(f"/metrics/cameras/{metric}/hourly?cameras={camera_id}&date={date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", { - 'detected_objects': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'no_infringement': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'low_infringement': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'high_infringement': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'critical_infringement': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'hours': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - }), - ("face-mask-detections", { - 'no_face': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'face_with_mask': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'face_without_mask': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'hours': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - }) - ] - ) - def test_get_an_hourly_report_properly_II_less_than_23_hours(self, config_rollback_cameras, metric, expected): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - date = "2021-02-19" - - response = client.get(f"/metrics/cameras/{metric}/hourly?cameras={camera_id}&date={date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", { - 'detected_objects': [108, 60, 38, 74, 54, 78, 88, 50, 102, 62, 94, 78, 32, 52, 134, 58, 72, 34, 62, 64, - 38, - 76, 68, 100], - 'no_infringement': [26, 10, 4, 36, 10, 22, 20, 12, 28, 12, 34, 36, 8, 16, 34, 22, 6, 12, 14, 8, 12, 20, - 22, - 36], - 'low_infringement': [20, 28, 8, 38, 22, 30, 14, 14, 22, 4, 2, 6, 20, 20, 38, 14, 30, 10, 10, 32, 8, 24, - 26, - 34], - 'high_infringement': [32, 4, 6, 0, 16, 2, 32, 22, 24, 12, 30, 0, 0, 2, 28, 14, 20, 4, 2, 18, 16, 26, 0, - 30], - 'critical_infringement': [30, 18, 20, 0, 6, 24, 22, 2, 28, 34, 28, 36, 4, 14, 34, 8, 16, 8, 36, 6, 2, 6, - 20, - 0], - 'hours': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]} - ), - ("face-mask-detections", { - 'no_face': [6, 6, 18, 4, 16, 4, 18, 16, 16, 0, 2, 4, 8, 12, 12, 4, 10, 4, 0, 0, 16, 6, 2, 4], - 'face_with_mask': [10, 8, 12, 18, 4, 6, 18, 14, 14, 6, 16, 6, 12, 14, 8, 4, 0, 2, 8, 2, 18, 10, 2, 8], - 'face_without_mask': [4, 12, 0, 16, 14, 14, 18, 2, 18, 16, 12, 8, 10, 14, 2, 0, 14, 10, 6, 6, 6, 16, 12, - 10], - 'hours': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - }) - ] - ) - def test_get_hourly_report_two_dates(self, config_rollback_cameras, metric, expected): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - camera_id_2 = camera["id"] - date = "2021-02-25" - - response = client.get(f"/metrics/cameras/{metric}/hourly?cameras={camera_id},{camera_id_2}&date={date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", {'detail': "Camera with id 'BAD_ID' does not exist"}), - ("face-mask-detections", {'detail': "Camera with id 'BAD_ID' does not exist"}) - ] - ) - def test_try_get_hourly_report_non_existent_id(self, config_rollback_cameras, metric, expected): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = 'BAD_ID' - date = "2021-02-25" - - response = client.get(f"/metrics/cameras/{metric}/hourly?cameras={camera_id}&date={date}") - - assert response.status_code == 404 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric", - ["social-distancing", "face-mask-detections"] - ) - def test_try_get_hourly_report_bad_date_format(self, config_rollback_cameras, metric): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera['id'] - date = "WRONG_DATE" - - response = client.get(f"/metrics/cameras/{metric}/hourly?cameras={camera_id}&date={date}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", { - 'detected_objects': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'no_infringement': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'low_infringement': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'high_infringement': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'critical_infringement': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'hours': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - }), - ("face-mask-detections", { - 'no_face': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'face_with_mask': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'face_without_mask': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'hours': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - }) - ] - ) - def test_try_get_hourly_report_non_existent_date(self, config_rollback_cameras, metric, expected): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera['id'] - date = "2003-05-24" - - response = client.get(f"/metrics/cameras/{metric}/hourly?cameras={camera_id}&date={date}") - - assert response.status_code == 200 - # Since no files with the specified date were found, no objects were added to the report. - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", {'detail': "Camera with id 'BAD_ID' does not exist"}), - ("face-mask-detections", {'detail': "Camera with id 'BAD_ID' does not exist"}) - ] - ) - def test_try_get_hourly_report_two_dates_one_of_them_bad_id(self, config_rollback_cameras, metric, expected): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - camera_id_2 = 'BAD_ID' - date = "2021-02-25" - - response = client.get(f"/metrics/cameras/{metric}/hourly?cameras={camera_id},{camera_id_2}&date={date}") - - assert response.status_code == 404 - assert response.json() == expected - - -# pytest -v api/tests/app/test_camera_metrics.py::TestsGetCameraDistancingDailyReport -class TestsGetCameraDistancingDailyReport: - """ Get Camera Distancing Daily Report , GET /metrics/cameras/social-distancing/daily""" - """ - Returns a daily report (for the date range specified) with information about the - social distancing infractions detected in the cameras. - """ - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", { - 'detected_objects': [0, 0, 148, 179], 'no_infringement': [0, 0, 136, 139], - 'low_infringement': [0, 0, 0, 19], 'high_infringement': [0, 0, 5, 17], - 'critical_infringement': [0, 0, 7, 4], 'dates': ['2020-09-20', '2020-09-21', '2020-09-22', '2020-09-23'] - }), - ("face-mask-detections", { - 'no_face': [0, 0, 18, 18], 'face_with_mask': [0, 0, 106, 135], 'face_without_mask': [0, 0, 26, 30], - 'dates': ['2020-09-20', '2020-09-21', '2020-09-22', '2020-09-23']}) - ] - ) - def test_get_a_daily_report_properly(self, config_rollback_cameras, metric, expected): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - to_date = "2020-09-23" - from_date = "2020-09-20" - - response = client.get( - f"/metrics/cameras/{metric}/daily?cameras={camera_id}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", { - 'detected_objects': [0], 'no_infringement': [0], 'low_infringement': [0], 'high_infringement': [0], - 'critical_infringement': [0], 'dates': ['2020-09-20'] - }), - ("face-mask-detections", { - 'no_face': [0], 'face_with_mask': [0], 'face_without_mask': [0], 'dates': ['2020-09-20']}) - ] - ) - def test_get_a_daily_report_properly_one_day(self, config_rollback_cameras, metric, expected): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - date = "2020-09-20" - - response = client.get( - f"/metrics/cameras/{metric}/daily?cameras={camera_id}&from_date={date}&to_date={date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", { - 'detected_objects': [104, 120, 161, 301], 'no_infringement': [5, 35, 143, 183], - 'low_infringement': [57, 42, 2, 87], 'high_infringement': [42, 43, 9, 27], - 'critical_infringement': [0, 0, 7, 4], 'dates': ['2020-09-20', '2020-09-21', '2020-09-22', '2020-09-23'] - }), - ("face-mask-detections", { - 'no_face': [85, 77, 114, 41], 'face_with_mask': [36, 76, 188, 170], - 'face_without_mask': [23, 33, 39, 128], - 'dates': ['2020-09-20', '2020-09-21', '2020-09-22', '2020-09-23'] - }) - ] - ) - def test_get_a_daily_report_properly_two_cameras(self, config_rollback_cameras, metric, expected): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - camera_id_2 = camera_2["id"] - to_date = "2020-09-23" - from_date = "2020-09-20" - - response = client.get( - f"/metrics/cameras/{metric}/daily?cameras={camera_id},{camera_id_2}&from_date={from_date}&to_date={to_date}" - ) - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", {'detail': "Camera with id 'BAD_ID' does not exist"}), - ("face-mask-detections", {'detail': "Camera with id 'BAD_ID' does not exist"}) - ] - ) - def test_try_get_a_daily_report_bad_id(self, config_rollback_cameras, metric, expected): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = 'BAD_ID' - - response = client.get( - f"/metrics/cameras/{metric}/daily?cameras={camera_id}&from_date=2020-09-20&to_date=2020-09-23") - - assert response.status_code == 404 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric", - ["social-distancing", "face-mask-detections"] - ) - def test_try_get_a_daily_report_bad_dates(self, config_rollback_cameras, metric): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - from_date = "BAD_DATE" - to_date = "BAD_DATE" - - response = client.get( - f"/metrics/cameras/{metric}/daily?cameras={camera_id}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", { - 'detected_objects': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'no_infringement': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'low_infringement': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'high_infringement': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'critical_infringement': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'dates': ['2003-05-18', '2003-05-19', '2003-05-20', '2003-05-21', '2003-05-22', '2003-05-23', - '2003-05-24', '2003-05-25', '2003-05-26', '2003-05-27', '2003-05-28'] - }), - ("face-mask-detections", { - 'no_face': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'face_with_mask': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'face_without_mask': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'dates': ['2003-05-18', '2003-05-19', '2003-05-20', '2003-05-21', '2003-05-22', '2003-05-23', - '2003-05-24', '2003-05-25', '2003-05-26', '2003-05-27', '2003-05-28'] - }) - ] - ) - def test_try_get_a_daily_report_no_reports_for_dates(self, config_rollback_cameras, metric, expected): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - from_date = "2003-05-18" - to_date = "2003-05-28" - - response = client.get( - f"/metrics/cameras/{metric}/daily?cameras={camera_id}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric", - ["social-distancing", "face-mask-detections"] - ) - def test_try_get_a_daily_report_wrong_dates(self, config_rollback_cameras, metric): - """from_date doesn't come before to_date""" - - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - from_date = "2020-09-20" - to_date = "2020-09-10" - - response = client.get( - f"/metrics/cameras/{metric}/daily?cameras={camera_id}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric", - ["social-distancing", "face-mask-detections"] - ) - def test_try_get_a_daily_report_only_from_date(self, config_rollback_cameras, metric): - """ Note that here as we do not send to_date, default value will take place, and to_date will be - date.today(). - WARNING: We could not mock the date.today() when the function is called within default query parameters. - So, we must be careful because the data range will be: "2021-01-10" - "today". - """ - - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - from_date = "2021-01-10" - - response = client.get(f"/metrics/cameras/{metric}/daily?cameras={camera_id}&from_date={from_date}") - - assert response.status_code == 200 - - @pytest.mark.parametrize( - "metric", - ["social-distancing", "face-mask-detections"] - ) - def test_try_get_a_daily_report_only_to_date(self, config_rollback_cameras, metric): - """ Note that here as we do not send from_date, default value will take place, and from_date will be - date.today(). - WARNING: We could not mock the date.today() when the function is called within default query parameters. - So, we must be careful because the data range will be: "date.today() - timedelta(days=3)" - "2020-09-20" and - this date range is probably wrong because from_date will be later than to_date. - """ - - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - to_date = "2020-09-20" - - response = client.get(f"/metrics/cameras/{metric}/daily?cameras={camera_id}&to_date={to_date}") - - assert response.status_code == 400 - - -# pytest -v api/tests/app/test_camera_metrics.py::TestsGetCameraDistancingWeeklyReport -class TestsGetCameraDistancingWeeklyReport: - """ Get Camera Distancing Weekly Report , GET /metrics/cameras/social-distancing/weekly """ - """ - Returns a weekly report (for the date range specified) with information about the social distancing infractions - detected in the cameras. - - If weeks is provided and is a positive number: - - from_date and to_date are ignored. - Report spans from weeks*7 + 1 days ago to yesterday. - Taking yesterday as the end of week. - - Else: - - Report spans from from_Date to to_date. - Taking Sunday as the end of week - """ - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", { - 'detected_objects': [0, 327], - 'no_infringement': [0, 275], - 'low_infringement': [0, 19], - 'high_infringement': [0, 22], - 'critical_infringement': [0, 11], - 'weeks': ['2020-09-20 2020-09-20', '2020-09-21 2020-09-23'] - }), - ("face-mask-detections", { - 'no_face': [0, 36], 'face_with_mask': [0, 241], 'face_without_mask': [0, 56], - 'weeks': ['2020-09-20 2020-09-20', '2020-09-21 2020-09-23'] - }) - ] - ) - def test_get_a_weekly_report_properly(self, config_rollback_cameras, metric, expected): - """ - Given date range spans two weeks. - Week 1: 2020-9-14 2020-9-20 - Week 2: 2020-9-21 2020-9-27 - """ - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - from_date = "2020-09-20" - to_date = "2020-09-23" - - response = client.get( - f"/metrics/cameras/{metric}/weekly?cameras={camera_id}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", { - 'detected_objects': [714], 'no_infringement': [555], 'low_infringement': [73], - 'high_infringement': [55], - 'critical_infringement': [30], 'weeks': ['2020-09-21 2020-09-27'] - }), - ("face-mask-detections", { - 'no_face': [85], 'face_with_mask': [519], 'face_without_mask': [171], - 'weeks': ['2020-09-21 2020-09-27'] - }) - ] - ) - def test_get_a_weekly_report_properly_II(self, config_rollback_cameras, metric, - expected): - """ - Given date range spans only one whole week. - Week 1: 2020-9-21 2020-9-27 - """ - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - from_date = "2020-09-21" - to_date = "2020-09-27" - - response = client.get( - f"/metrics/cameras/{metric}/weekly?cameras={camera_id}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", { - 'detected_objects': [535, 754, 714, 714], 'no_infringement': [416, 622, 555, 555], - 'low_infringement': [54, 59, 73, 73], 'high_infringement': [38, 56, 55, 55], - 'critical_infringement': [26, 19, 30, 30], - 'weeks': ['2020-09-02 2020-09-08', '2020-09-09 2020-09-15', '2020-09-16 2020-09-22', - '2020-09-23 2020-09-29'] - }), - ("face-mask-detections", { - 'no_face': [88, 85, 106, 85], 'face_with_mask': [310, 519, 445, 519], - 'face_without_mask': [150, 171, 180, 171], - 'weeks': ['2020-09-02 2020-09-08', '2020-09-09 2020-09-15', '2020-09-16 2020-09-22', - '2020-09-23 2020-09-29'] - }) - ] - ) - @freeze_time("2020-09-30") - def test_get_a_weekly_report_properly_weeks_value(self, config_rollback_cameras, - metric, expected): - """ - Here we mock datetime.date.today() to a more convenient date set in @freeze_time("2020-09-30") - """ - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - weeks = 4 - - response = client.get( - f"/metrics/cameras/{metric}/weekly?cameras={camera_id}&weeks={weeks}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", { - 'detected_objects': [0, 0, 0, 0, 0], 'no_infringement': [0, 0, 0, 0, 0], - 'low_infringement': [0, 0, 0, 0, 0], 'high_infringement': [0, 0, 0, 0, 0], - 'critical_infringement': [0, 0, 0, 0, 0] - }), - ("face-mask-detections", { - 'no_face': [0, 0, 0, 0, 0], 'face_with_mask': [0, 0, 0, 0, 0], 'face_without_mask': [0, 0, 0, 0, 0] - }) - ] - ) - def test_get_a_weekly_report_no_dates_or_week_values(self, config_rollback_cameras, metric, expected): - """ - WARNING: We could not mock the date.today() when the function is called within default query parameters. - So, we must be careful because the data range will be: "date.today() - timedelta(days=date.today().weekday(), - weeks=4)" - "date.today()" and this date range (4 weeks ago from today) should never have values for any - camera in order to pass the test. Moreover, we do not assert response.json()["weeks"] because will change - depending on the date. - """ - - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - - response = client.get( - f"/metrics/cameras/{metric}/weekly?cameras={camera_id}") - - assert response.status_code == 200 - for key in expected: - assert response.json()[key] == expected[key] - - @pytest.mark.parametrize( - "metric", - ["social-distancing", "face-mask-detections"] - ) - @freeze_time("2020-09-30") - def test_try_get_a_weekly_report_properly_weeks_value_wrong(self, config_rollback_cameras, metric): - """ - Here we mock datetime.date.today() to a more convenient date set in @freeze_time("2020-09-30") - """ - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - weeks = "WRONG" - - response = client.get( - f"/metrics/cameras/{metric}/weekly?cameras={camera_id}&weeks={weeks}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", { - 'detected_objects': [535, 754, 714, 714], 'no_infringement': [416, 622, 555, 555], - 'low_infringement': [54, 59, 73, 73], 'high_infringement': [38, 56, 55, 55], - 'critical_infringement': [26, 19, 30, 30], - 'weeks': ['2020-09-02 2020-09-08', '2020-09-09 2020-09-15', '2020-09-16 2020-09-22', - '2020-09-23 2020-09-29'] - }), - ("face-mask-detections", { - 'no_face': [88, 85, 106, 85], 'face_with_mask': [310, 519, 445, 519], - 'face_without_mask': [150, 171, 180, 171], - 'weeks': ['2020-09-02 2020-09-08', '2020-09-09 2020-09-15', '2020-09-16 2020-09-22', - '2020-09-23 2020-09-29'] - }) - ] - ) - @freeze_time("2020-09-30") - def test_get_a_weekly_report_properly_weeks_value_and_dates(self, config_rollback_cameras, metric, expected): - """ - Here we mock datetime.date.today() to a more convenient date set in @freeze_time("2012-01-01") - In addition, query string weeks is given, but also from_date and to_date. So dates should be ignored. - """ - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - weeks = 4 - from_date = "2020-09-21" - to_date = "2020-09-27" - - response = client.get( - f"/metrics/cameras/{metric}/weekly?cameras={camera_id}&weeks={weeks}&from_date={from_date}&" - f"to_date={to_date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", {'detail': "Camera with id 'BAD_ID' does not exist"}), - ("face-mask-detections", {'detail': "Camera with id 'BAD_ID' does not exist"}) - ] - ) - def test_try_get_a_weekly_report_bad_id(self, config_rollback_cameras, metric, expected): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = 'BAD_ID' - from_date = "2020-09-20" - to_date = "2020-09-23" - - response = client.get( - f"/metrics/cameras/{metric}/weekly?cameras={camera_id}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 404 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", { - 'detected_objects': [0, 0, 0, 0, 0], 'no_infringement': [0, 0, 0, 0, 0], - 'low_infringement': [0, 0, 0, 0, 0], 'high_infringement': [0, 0, 0, 0, 0], - 'critical_infringement': [0, 0, 0, 0, 0] - }), - ("face-mask-detections", { - 'no_face': [0, 0, 0, 0, 0], 'face_with_mask': [0, 0, 0, 0, 0], 'face_without_mask': [0, 0, 0, 0, 0] - }) - ] - ) - def test_get_a_weekly_report_no_query_string(self, config_rollback_cameras, - metric, expected): - """ - If no camera is provided, it will search all IDs for each existing camera. - - WARNING: We could not mock the date.today() when the function is called within default query parameters. - So, we must be careful because the data range will be: "date.today() - timedelta(days=date.today().weekday(), - weeks=4)" - "date.today()" and this date range (4 weeks ago from today) should never have values for any - camera in order to pass the test. Moreover, we do not assert response.json()["weeks"] because will change - depending on the date. - """ - - camera, camera_2, client, config_sample_path = config_rollback_cameras - - response = client.get( - f"/metrics/cameras/{metric}/weekly") - - assert response.status_code == 200 - for key in expected: - assert response.json()[key] == expected[key] - - @pytest.mark.parametrize( - "metric", - ["social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_bad_dates_format(self, config_rollback_cameras, metric): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - from_date = "BAD_DATE" - to_date = "BAD_DATE" - - response = client.get( - f"/metrics/cameras/{metric}/weekly?cameras={camera_id}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", { - 'detected_objects': [0, 0, 0, 0, 0, 0], 'no_infringement': [0, 0, 0, 0, 0, 0], - 'low_infringement': [0, 0, 0, 0, 0, 0], 'high_infringement': [0, 0, 0, 0, 0, 0], - 'critical_infringement': [0, 0, 0, 0, 0, 0], - 'weeks': ['2012-04-12 2012-04-15', '2012-04-16 2012-04-22', '2012-04-23 2012-04-29', - '2012-04-30 2012-05-06', '2012-05-07 2012-05-13', '2012-05-14 2012-05-18'] - }), - ("face-mask-detections", { - 'no_face': [0, 0, 0, 0, 0, 0], 'face_with_mask': [0, 0, 0, 0, 0, 0], - 'face_without_mask': [0, 0, 0, 0, 0, 0], - 'weeks': ['2012-04-12 2012-04-15', '2012-04-16 2012-04-22', '2012-04-23 2012-04-29', - '2012-04-30 2012-05-06', '2012-05-07 2012-05-13', '2012-05-14 2012-05-18'] - }) - ] - ) - def test_try_get_a_weekly_report_non_existent_dates(self, config_rollback_cameras, - metric, expected): - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - from_date = "2012-04-12" - to_date = "2012-05-18" - - response = client.get( - f"/metrics/cameras/{metric}/weekly?cameras={camera_id}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric", - ["social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_invalid_range_of_dates(self, config_rollback_cameras, - metric): - """from_date is after to_date""" - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - from_date = "2020-09-25" - to_date = "2020-09-18" - - response = client.get( - f"/metrics/cameras/{metric}/weekly?cameras={camera_id}&from_date={from_date}&to_date={to_date}") - - assert response.status_code == 400 - - @pytest.mark.parametrize( - "metric,expected", - [ - ("social-distancing", { - 'detected_objects': [104, 582], 'no_infringement': [5, 361], 'low_infringement': [57, 131], - 'high_infringement': [42, 79], 'critical_infringement': [0, 11], - 'weeks': ['2020-09-20 2020-09-20', '2020-09-21 2020-09-23'] - }), - ("face-mask-detections", { - 'no_face': [85, 232], 'face_with_mask': [36, 434], 'face_without_mask': [23, 200], - 'weeks': ['2020-09-20 2020-09-20', '2020-09-21 2020-09-23'] - }) - ] - ) - def test_try_get_a_weekly_report_no_id(self, config_rollback_cameras, metric, expected): - """ - If no camera is provided, it will search all IDs for each existing camera. - No problem because we are mocking the date and we have the control over every existent camera. Unit is not - broke. - Our existing cameras are the ones that appeared in the config file of 'config_rollback_cameras' -> the ones - from 'config-x86-openvino_MAIN' -> the ones with ids 49, 50 (cameras with ids 51 and 52 appear in another - config file, so will not play here) - """ - camera, camera_2, client, config_sample_path = config_rollback_cameras - from_date = "2020-09-20" - to_date = "2020-09-23" - - response = client.get( - f"/metrics/cameras/{metric}/weekly?from_date={from_date}&to_date={to_date}") - - assert response.status_code == 200 - assert response.json() == expected - - @pytest.mark.parametrize( - "metric", - ["social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_only_from_date(self, config_rollback_cameras, metric): - """ - Note that here as we do not send to_date, default value will take place, and to_date will be - date.today(). - WARNING: We could not mock the date.today() when the function is called within default query parameters. - So, we must be careful because the data range will be: "2021-01-10" - "today". - """ - - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - from_date = "2021-01-10" - - response = client.get(f"/metrics/cameras/{metric}/weekly?cameras={camera_id}&from_date={from_date}") - - assert response.status_code == 200 - - @pytest.mark.parametrize( - "metric", - ["social-distancing", "face-mask-detections"] - ) - def test_try_get_a_weekly_report_only_to_date(self, config_rollback_cameras, metric): - """ - Note that here as we do not send from_date, default value will take place, and from_date will be - date.today(). - WARNING: We could not mock the date.today() when the function is called within default query parameters. - So, we must be careful because the data range will be: "date.today() - timedelta(days=date.today().weekday(), - weeks=4)" - "2020-09-20" and this date range is probably wrong because from_date will be later than to_date. - """ - - camera, camera_2, client, config_sample_path = config_rollback_cameras - camera_id = camera["id"] - to_date = "2020-09-20" - - response = client.get(f"/metrics/cameras/{metric}/weekly?cameras={camera_id}&to_date={to_date}") - - assert response.status_code == 400 diff --git a/api/tests/data/config-x86-openvino_EMPTY.ini b/api/tests/data/config-x86-openvino_EMPTY.ini index 0ab0c338..6b8e5962 100644 --- a/api/tests/data/config-x86-openvino_EMPTY.ini +++ b/api/tests/data/config-x86-openvino_EMPTY.ini @@ -95,15 +95,8 @@ Authorization = TimeInterval = 0.5 Enabled = False -; Enable the PeriodicTask_0 if you want to generate metrics -[PeriodicTask_0] -Name = metrics -Enabled = False -; Expressed in minutes -LiveInterval = 10 - ; Enable the PeriodicTask_1 if you want to backup your files in S3 -[PeriodicTask_1] +[PeriodicTask_0] Name = s3_backup Enabled = False ; Expressed in minutes diff --git a/api/tests/data/config-x86-openvino_JUST_CAMERAS.ini b/api/tests/data/config-x86-openvino_JUST_CAMERAS.ini index 9dd538c7..2981e3be 100644 --- a/api/tests/data/config-x86-openvino_JUST_CAMERAS.ini +++ b/api/tests/data/config-x86-openvino_JUST_CAMERAS.ini @@ -95,15 +95,8 @@ Authorization = TimeInterval = 0.5 Enabled = False -; Enable the PeriodicTask_0 if you want to generate metrics -[PeriodicTask_0] -Name = metrics -Enabled = False -; Expressed in minutes -LiveInterval = 10 - ; Enable the PeriodicTask_1 if you want to backup your files in S3 -[PeriodicTask_1] +[PeriodicTask_0] Name = s3_backup Enabled = False ; Expressed in minutes diff --git a/api/tests/data/config-x86-openvino_METRICS.ini b/api/tests/data/config-x86-openvino_METRICS.ini index 6c4c2cc4..7fc49c98 100644 --- a/api/tests/data/config-x86-openvino_METRICS.ini +++ b/api/tests/data/config-x86-openvino_METRICS.ini @@ -98,11 +98,6 @@ TimeInterval = 0.5 Enabled = False [PeriodicTask_0] -Name = metrics -Enabled = False -LiveInterval = 10 - -[PeriodicTask_1] Name = s3_backup Enabled = False BackupInterval = 30 diff --git a/config-coral.ini b/config-coral.ini index b511a836..dbd52219 100644 --- a/config-coral.ini +++ b/config-coral.ini @@ -127,15 +127,8 @@ TimeInterval = 0.5 Enabled = False SendingInterval = 5 -; Enable the PeriodicTask_0 if you want to generate metrics -[PeriodicTask_0] -Name = metrics -Enabled = True -; Expressed in minutes -LiveInterval = 10 - ; Enable the PeriodicTask_1 if you want to backup your files in S3 -[PeriodicTask_1] +[PeriodicTask_0] Name = s3_backup Enabled = False ; Expressed in minutes diff --git a/config-jetson-nano.ini b/config-jetson-nano.ini index 8a7146f2..b4172432 100644 --- a/config-jetson-nano.ini +++ b/config-jetson-nano.ini @@ -118,15 +118,8 @@ TimeInterval = 0.5 Enabled = False SendingInterval = 5 -; Enable the PeriodicTask_0 if you want to generate metrics -[PeriodicTask_0] -Name = metrics -Enabled = True -; Expressed in minutes -LiveInterval = 10 - ; Enable the PeriodicTask_1 if you want to backup your files in S3 -[PeriodicTask_1] +[PeriodicTask_0] Name = s3_backup Enabled = False ; Expressed in minutes diff --git a/config-jetson-tx2.ini b/config-jetson-tx2.ini index 2c62cc2f..973dfea2 100644 --- a/config-jetson-tx2.ini +++ b/config-jetson-tx2.ini @@ -128,15 +128,8 @@ TimeInterval = 0.5 Enabled = False SendingInterval = 5 -; Enable the PeriodicTask_0 if you want to generate metrics -[PeriodicTask_0] -Name = metrics -Enabled = True -; Expressed in minutes -LiveInterval = 10 - ; Enable the PeriodicTask_1 if you want to backup your files in S3 -[PeriodicTask_1] +[PeriodicTask_0] Name = s3_backup Enabled = False ; Expressed in minutes diff --git a/config-x86-gpu-tensorrt.ini b/config-x86-gpu-tensorrt.ini index 36e36565..77afba79 100644 --- a/config-x86-gpu-tensorrt.ini +++ b/config-x86-gpu-tensorrt.ini @@ -130,15 +130,8 @@ TimeInterval = 0.5 Enabled = False SendingInterval = 5 -; Enable the PeriodicTask_0 if you want to generate metrics -[PeriodicTask_0] -Name = metrics -Enabled = True -; Expressed in minutes -LiveInterval = 10 - ; Enable the PeriodicTask_1 if you want to backup your files in S3 -[PeriodicTask_1] +[PeriodicTask_0] Name = s3_backup Enabled = False ; Expressed in minutes diff --git a/config-x86-gpu.ini b/config-x86-gpu.ini index 44274a7e..e5ca863d 100644 --- a/config-x86-gpu.ini +++ b/config-x86-gpu.ini @@ -127,15 +127,8 @@ TimeInterval = 0.5 Enabled = False SendingInterval = 5 -; Enable the PeriodicTask_0 if you want to generate metrics -[PeriodicTask_0] -Name = metrics -Enabled = True -; Expressed in minutes -LiveInterval = 10 - ; Enable the PeriodicTask_1 if you want to backup your files in S3 -[PeriodicTask_1] +[PeriodicTask_0] Name = s3_backup Enabled = False ; Expressed in minutes diff --git a/config-x86-openvino.ini b/config-x86-openvino.ini index 7fe822e3..027c6068 100644 --- a/config-x86-openvino.ini +++ b/config-x86-openvino.ini @@ -115,15 +115,8 @@ TimeInterval = 0.5 Enabled = False SendingInterval = 5 -; Enable the PeriodicTask_0 if you want to generate metrics -[PeriodicTask_0] -Name = metrics -Enabled = True -; Expressed in minutes -LiveInterval = 10 - ; Enable the PeriodicTask_1 if you want to backup your files in S3 -[PeriodicTask_1] +[PeriodicTask_0] Name = s3_backup Enabled = False ; Expressed in minutes diff --git a/config-x86.ini b/config-x86.ini index 9e9ea4e8..002a2fb0 100644 --- a/config-x86.ini +++ b/config-x86.ini @@ -130,15 +130,8 @@ TimeInterval = 0.5 Enabled = False SendingInterval = 5 -; Enable the PeriodicTask_0 if you want to generate metrics -[PeriodicTask_0] -Name = metrics -Enabled = True -; Expressed in minutes -LiveInterval = 10 - ; Enable the PeriodicTask_1 if you want to backup your files in S3 -[PeriodicTask_1] +[PeriodicTask_0] Name = s3_backup Enabled = False ; Expressed in minutes diff --git a/constants.py b/constants.py index 2166a806..f1dceca1 100644 --- a/constants.py +++ b/constants.py @@ -3,9 +3,3 @@ # Entities CAMERAS = "cameras" -# Metrics -OCCUPANCY = "occupancy" -SOCIAL_DISTANCING = "social-distancing" -FACEMASK_USAGE = "facemask-usage" -IN_OUT = "in-out" -DWELL_TIME = "dwell-time" diff --git a/libs/backups/s3_backup.py b/libs/backups/s3_backup.py index bd9ebd79..54c8ff8e 100644 --- a/libs/backups/s3_backup.py +++ b/libs/backups/s3_backup.py @@ -1,9 +1,8 @@ import os -from datetime import date, timedelta +from datetime import date from libs.config_engine import ConfigEngine -from libs.metrics import FaceMaskUsageMetric, SocialDistancingMetric, DwellTimeMetric, InOutMetric from libs.uploaders.s3_uploader import S3Uploader from libs.utils.loggers import get_source_log_directory @@ -24,26 +23,3 @@ def raw_data_backup(config: ConfigEngine, bucket_name: str): if os.path.isfile(today_objects_csv): # Upload the today object files to S3 s3_uploader.upload_file(bucket_name, today_objects_csv, f"{str(date.today())}.csv", bucket_prefix) - -def reports_backup(config: ConfigEngine, bucket_name: str): - """ - Uploads into s3 the reports generated yesterday by the cameras. - """ - s3_uploader = S3Uploader() - sources = config.get_video_sources() - source_log_directory = get_source_log_directory(config) - yesterday = str(date.today() - timedelta(days=1)) - # Backup the sources yesterday reports - for src in sources: - source_directory = os.path.join(source_log_directory, src["id"]) - reports_directory = os.path.join(source_directory, "reports") - source_metrics = [FaceMaskUsageMetric, SocialDistancingMetric, DwellTimeMetric, InOutMetric] - for metric in source_metrics: - metric_folder = os.path.join(reports_directory, metric.reports_folder) - metric_hourly_report = os.path.join(metric_folder, f"report_{yesterday}.csv") - metric_daily_report = os.path.join(metric_folder, "report.csv") - bucket_prefix = f"sources/{src['id']}/reports/{metric.reports_folder}" - if os.path.isfile(metric_hourly_report): - s3_uploader.upload_file(bucket_name, metric_hourly_report, f"report_{yesterday}.csv", bucket_prefix) - if os.path.isfile(metric_daily_report): - s3_uploader.upload_file(bucket_name, metric_daily_report, "report.csv", bucket_prefix) \ No newline at end of file diff --git a/libs/metrics/__init__.py b/libs/metrics/__init__.py deleted file mode 100644 index 9eaa907c..00000000 --- a/libs/metrics/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from . social_distancing import SocialDistancingMetric # noqa -from .face_mask_usage import FaceMaskUsageMetric # noqa -from .in_out import InOutMetric # noqa -from .dwell_time import DwellTimeMetric # noqa diff --git a/libs/metrics/base.py b/libs/metrics/base.py deleted file mode 100644 index 9a6b26dc..00000000 --- a/libs/metrics/base.py +++ /dev/null @@ -1,477 +0,0 @@ -import ast -import cv2 as cv -import os -import copy -import csv -import numpy as np -import pandas as pd -import logging -import numbers -from enum import Enum - -from collections import deque -from datetime import date, datetime, timedelta, time -from typing import Dict, List, Iterator -from pandas.api.types import is_numeric_dtype - -from libs.utils.config import get_source_config_directory -from libs.utils.loggers import get_source_log_directory, get_source_logging_interval -from libs.utils.utils import is_list_recursively_empty, validate_file_exists_and_is_not_empty - -logger = logging.getLogger(__name__) - - -class AggregationMode(Enum): - SINGLE = 1 - BATCH = 2 - - -def parse_date_range(dates): - """Generator. From a continuous sorted list of datetime64 yields tuples (start_date, end_date) for each week encompassed""" - while not dates.empty: - start = 0 - end = (7 - dates[start].weekday()) - 1 - if end > len(dates): - end = len(dates) - 1 - - yield (dates[start], dates[end]) - dates = dates[end+1:] - - -class BaseMetric: - processing_count_threshold = 3 - reports_folder = None - csv_headers = [] - csv_default_values = [] - entity = "source" - # Use the `live_csv_headers` when the csv strucutre differs from the hourly/daily - live_csv_headers = [] - # Values ignored when returning reports - ignored_headers = [] - - @classmethod - def report_headers(cls): - return [h for h in cls.csv_headers if h not in cls.ignored_headers] - - @classmethod - def get_entity_base_directory(cls, config=None): - if cls.entity != "source": - raise NotImplementedError - if config: - return get_source_log_directory(config) - return os.getenv("SourceLogDirectory") - - @classmethod - def get_roi_file_path(cls, camera_id, config): - """ Returns the path to the roi_contour file """ - return f"{get_source_config_directory(config)}/{camera_id}/roi_filtering/roi_contour.csv" - - @classmethod - def get_roi_contour(cls, roi_file_path): - """ Given the path to the roi file it loads it and returns it """ - if validate_file_exists_and_is_not_empty(roi_file_path): - return np.loadtxt(roi_file_path, delimiter=',', dtype=int) - else: - return None - - @classmethod - def get_roi_contour_for_entity(cls, config, source_id): - if cls.entity == "area": - raise NotImplementedError - return cls.get_roi_contour(cls.get_roi_file_path(source_id, config)) - - @staticmethod - def is_inside_roi(detected_object, roi_contour): - """ - An object is inside the RoI if its middle bottom point lies inside it. - params: - detected_object: a dictionary, that has attributes of a detected object such as "id", - "centroid" (a tuple of the normalized centroid coordinates (cx,cy,w,h) of the box), - "bbox" (a tuple of the normalized (xmin,ymin,xmax,ymax) coordinate of the box), - "centroidReal" (a tuple of the centroid coordinates (cx,cy,w,h) of the box) and - "bbox_real" (a tuple of the (xmin,ymin,xmax,ymax) coordinate of the box) - - roi_contour: An array of 2-tuples that compose the contour of the RoI - returns: - True of False: Depending if the objects coodinates are inside the RoI - """ - corners = detected_object["bbox_real"] - x1, x2 = int(corners[0]), int(corners[2]) - y1, y2 = int(corners[1]), int(corners[3]) # noqa - if cv.pointPolygonTest(roi_contour, (x1 + (x2-x1)/2, y2), False) >= 0: - return True - return False - - @classmethod - def ignore_objects_outside_roi(cls, csv_row, roi_contour): - detections = ast.literal_eval(csv_row["Detections"]) - detections_in_roi = [] - for index, obj in enumerate(detections): - obj["index"] = index - if cls.is_inside_roi(obj, roi_contour): - detections_in_roi.append(obj) - violations_indexes = ast.literal_eval(csv_row["ViolationsIndexes"]) - violations_indexes_in_roi = [] - for index, obj in enumerate(detections_in_roi): - if obj["index"] in violations_indexes: - violations_indexes_in_roi.append(index) - # Update the csv fields - csv_row["Detections"] = str(detections_in_roi) - csv_row["ViolationsIndexes"] = str(violations_indexes_in_roi) - csv_row["DetectedObjects"] = len(detections_in_roi) - csv_row["ViolatingObjects"] = len(violations_indexes_in_roi) - return csv_row - - @classmethod - def get_entities(cls, config): - if cls.entity != "source": - raise NotImplementedError - return config.get_video_sources() - - @classmethod - def process_metric_csv_row(cls, csv_row, object_logs): - """ - Extracts from the `csv_row` the required information to calculate the metric. - The extracted information is populated into `object_logs`. - """ - raise NotImplementedError - - @classmethod - def process_csv_row(cls, csv_row, object_logs, roi_contour=None): - if roi_contour is not None: - csv_row = cls.ignore_objects_outside_roi(csv_row, roi_contour) - cls.process_metric_csv_row(csv_row, object_logs) - - @classmethod - def generate_hourly_metric_data(cls, config, object_logs, entity): - """ - Generates the hourly reports for the hours received in `object_logs`. - """ - raise NotImplementedError - - @classmethod - def generate_hourly_csv_data(cls, config, entity: Dict, entity_file: str, time_from: datetime, - time_until: datetime): - roi_contour = cls.get_roi_contour_for_entity(config, entity["id"]) - if not os.path.isfile(entity_file): - if not cls.entity: - raise NotImplementedError - entity_type = "Camera" - logger.warn(f"The [{entity_type}: {entity['id']}] contains no recorded data for that day") - return - objects_logs = {} - for hour in range(time_from.hour, time_until.hour): - objects_logs[hour] = {} - with open(entity_file, newline='') as csvfile: - reader = csv.DictReader(csvfile) - for row in reader: - row_time = datetime.strptime(row["Timestamp"], "%Y-%m-%d %H:%M:%S") - if time_from <= row_time < time_until: - cls.process_csv_row(row, objects_logs, roi_contour) - return cls.generate_hourly_metric_data(config, objects_logs, entity) - - @classmethod - def compute_hourly_metrics(cls, config): - if not cls.reports_folder: - raise Exception(f"The metric {cls} doesn't have configured the folder parameter") - entities = cls.get_entities(config) - current_hour = datetime.now().hour - for entity in entities: - if not cls.can_execute(config, entity): - continue - entity_directory = entity.base_directory - log_directory = None - if cls.entity == "source": - log_directory = os.path.join(entity_directory, "objects_log") - else: - raise NotImplementedError - reports_directory = os.path.join(entity_directory, "reports", cls.reports_folder) - # Create missing directories - os.makedirs(log_directory, exist_ok=True) - os.makedirs(reports_directory, exist_ok=True) - time_until = datetime.combine(date.today(), time(current_hour, 0)) - report_date = cls.get_report_date() - entity_csv = os.path.join(log_directory, str(report_date) + ".csv") - daily_csv = os.path.join(reports_directory, "report_" + str(report_date) + ".csv") - - time_from = datetime.combine(report_date, time(0, 0)) - if os.path.isfile(daily_csv): - with open(daily_csv, "r", newline='') as csvfile: - processed_hours = sum(1 for line in csv.reader(csvfile)) - 1 - time_from = datetime.combine(report_date, time(processed_hours + 1, 0)) - else: - with open(daily_csv, "a", newline='') as csvfile: - writer = csv.DictWriter(csvfile, fieldnames=cls.csv_headers) - writer.writeheader() - csv_data = cls.generate_hourly_csv_data(config, entity, entity_csv, time_from, time_until) - if csv_data is None: - if not cls.entity: - raise NotImplementedError - entity_type = "Camera" - logger.warn(f"Hourly report not generated! [{entity_type}: {entity['id']}]") - continue - with open(daily_csv, "a", newline='') as csvfile: - writer = csv.DictWriter(csvfile, fieldnames=cls.csv_headers) - for item in csv_data: - row = {} - for index, header in enumerate(cls.csv_headers): - row[header] = item[index] - writer.writerow(row) - - @classmethod - def generate_daily_csv_data(cls, yesterday_hourly_file): - """ - Generates the daily report for the `yesterday_hourly_file` received. - """ - raise NotImplementedError - - @classmethod - def compute_daily_metrics(cls, config): - base_directory = cls.get_entity_base_directory(config) - entities = cls.get_entities(config) - for entity in entities: - if not cls.can_execute(config, entity): - continue - entity_directory = os.path.join(base_directory, entity["id"]) - reports_directory = os.path.join(entity_directory, "reports", cls.reports_folder) - # Create missing directories - os.makedirs(reports_directory, exist_ok=True) - yesterday = str(date.today() - timedelta(days=1)) - hourly_csv = os.path.join(reports_directory, "report_" + yesterday + ".csv") - report_csv = os.path.join(reports_directory, "report.csv") - if not os.path.isfile(hourly_csv): - if not cls.entity: - raise NotImplementedError - entity_type = "Camera" - logger.warn(f"Daily report for date {str(yesterday)} not generated! [{entity_type}: {entity['id']}]") - continue - daily_data = cls.generate_daily_csv_data(hourly_csv) - headers = ["Date"] + cls.csv_headers - report_file_exists = os.path.isfile(report_csv) - with open(report_csv, "a") as csvfile: - writer = csv.DictWriter(csvfile, fieldnames=headers) - - if not report_file_exists: - writer.writeheader() - row = {"Date": yesterday} - for index, header in enumerate(cls.csv_headers): - row[header] = daily_data[index] - writer.writerow(row) - - @classmethod - def generate_live_csv_data(cls, config, today_entity_csv, entity, entries_in_interval): - """ - Generates the live report using the `today_entity_csv` file received. - """ - raise NotImplementedError - - @classmethod - def compute_live_metrics(cls, config, live_interval): - base_directory = cls.get_entity_base_directory(config) - entities = cls.get_entities(config) - for entity in entities: - if not cls.can_execute(config, entity): - continue - entity_directory = os.path.join(base_directory, entity["id"]) - reports_directory = os.path.join(entity_directory, "reports", cls.reports_folder) - # Create missing directories - os.makedirs(reports_directory, exist_ok=True) - log_directory = None - if cls.entity == "source": - log_directory = os.path.join(entity_directory, "objects_log") - else: - raise NotImplementedError - today_entity_csv = os.path.join(log_directory, str(date.today()) + ".csv") - live_report_csv = os.path.join(reports_directory, "live.csv") - csv_headers = cls.live_csv_headers if cls.live_csv_headers else cls.csv_headers - headers = ["Time"] + csv_headers - report_file_exists = os.path.isfile(live_report_csv) - if not os.path.isfile(today_entity_csv): - return - entries_in_interval = int(live_interval * 60 / get_source_logging_interval(config)) - live_data = cls.generate_live_csv_data(config, today_entity_csv, entity, entries_in_interval) - assert len(live_data) == len(csv_headers), "Row element count not the same as header count!!" - with open(live_report_csv, "a") as csvfile: - writer = csv.DictWriter(csvfile, fieldnames=headers) - if not report_file_exists: - writer.writeheader() - row = {"Time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")} - for index, header in enumerate(csv_headers): - row[header] = live_data[index] - writer.writerow(row) - - @classmethod - def get_hourly_report(cls, entities: List[str], report_date: date) -> Dict: - base_directory = cls.get_entity_base_directory() - hours = list(range(0, 24)) - results = {} - hourly_headers = cls.report_headers() - for index, header in enumerate(hourly_headers): - if cls.csv_default_values[index] == 0: - results[header] = np.zeros(24) - else: - results[header] = [] - for entity in entities: - entity_directory = os.path.join(base_directory, entity) - reports_directory = os.path.join(entity_directory, "reports", cls.reports_folder) - file_path = os.path.join(reports_directory, f"report_{report_date}.csv") - if os.path.exists(file_path): - df = pd.read_csv(file_path) - for header in hourly_headers: - if is_numeric_dtype(df[header]): - results[header] += np.pad( - df[header].to_numpy(), (0, 24 - df[header].to_numpy().size), mode="constant" - ) - else: # It's a list - values = df[header].apply(ast.literal_eval).tolist() - entry = np.pad(values, 0, mode="constant").tolist() - if is_list_recursively_empty(results[header]): - results[header] = entry - else: - results[header] = [[c + d for c, d in zip(a, b)] for a, b in zip(results[header], entry)] - for metric in results: - results[metric] = list(results[metric]) - results["Hours"] = hours - return results - - @classmethod - def get_daily_report(cls, entities: List[str], from_date: date, to_date: date) -> Dict: - base_directory = cls.get_entity_base_directory() - date_range = pd.date_range(start=from_date, end=to_date) - base_results = {} - daily_headers = cls.report_headers() - for key in date_range: - base_results[key.strftime('%Y-%m-%d')] = {} - for index, header in enumerate(cls.csv_headers): - base_results[key.strftime('%Y-%m-%d')][header] = copy.deepcopy(cls.csv_default_values[index]) - - for entity in entities: - entity_directory = os.path.join(base_directory, entity) - reports_directory = os.path.join(entity_directory, "reports", cls.reports_folder) - file_path = os.path.join(reports_directory, "report.csv") - if not os.path.isfile(file_path): - continue - df = pd.read_csv(file_path) - df['Date'] = pd.to_datetime(df['Date'], format='%Y-%m-%d') - mask = (df['Date'] >= pd.to_datetime(from_date)) & (df['Date'] <= pd.to_datetime(to_date)) - entity_report = df.loc[mask] - entity_report['Date'] = entity_report['Date'].apply(lambda x: x.strftime('%Y-%m-%d')) - entity_report = entity_report.set_index('Date').T - entity_report_dict = entity_report.to_dict() - for key in entity_report_dict: - for header in daily_headers: - if isinstance(entity_report_dict[key][header], numbers.Number): - base_results[key][header] += entity_report_dict[key][header] - else: # It's a list - entry = ast.literal_eval(entity_report_dict[key][header]) - if is_list_recursively_empty(base_results[key][header]): - base_results[key][header] = entry - else: - base_results[key][header] = [a + b for a, b in zip(base_results[key][header], entry)] - - report = {"Dates": []} - for header in daily_headers: - report[header] = [] - for report_date in sorted(base_results): - report["Dates"].append(report_date) - for header in daily_headers: - report[header].append(base_results[report_date][header]) - return report - - @classmethod - def generate_weekly_report_data(cls, entities: List[str], number_of_weeks: int = 0, - from_date: date = None, to_date: date = None) -> Dict: - weekly_report_data = {} - number_of_days = number_of_weeks*7 - if number_of_days > 0: - # Separate weeks in range taking a number of weeks ago, considering the week ended yesterday - date_range = pd.date_range(end=date.today() - timedelta(days=1), periods=number_of_days) - start_dates = date_range[0::7] - end_dates = date_range[6::7] - week_span = list(zip(start_dates, end_dates)) - elif isinstance(from_date, date) and isinstance(to_date, date): - # Separate weeks in range considering the week starts on Monday - date_range = pd.date_range(start=from_date, end=to_date) - week_span = list(parse_date_range(date_range)) - else: - week_span = [] - for start_date, end_date in week_span: - weekly_report_data[ - f"{start_date.strftime('%Y-%m-%d')} {end_date.strftime('%Y-%m-%d')}" - ] = cls.get_daily_report(entities, start_date, end_date) - return weekly_report_data - - @classmethod - def get_weekly_report(cls, entities: List[str], number_of_weeks: int = 0, - from_date: date = None, to_date: date = None) -> Dict: - weekly_report_data = cls.generate_weekly_report_data(entities, number_of_weeks, from_date, to_date) - report = {"Weeks": []} - weekly_headers = cls.report_headers() - for header in weekly_headers: - report[header] = [] - for week, week_data in weekly_report_data.items(): - report["Weeks"].append(week) - for header in weekly_headers: - report[header].append(sum(week_data[header])) - return report - - @classmethod - def get_trend_live_values(cls, live_report_paths: Iterator[str]) -> Iterator[int]: - raise NotImplementedError - - @classmethod - def calculate_trend_value(cls, trend_values: Iterator[int]) -> float: - x = np.arange(0, len(trend_values)) - y = np.array(trend_values) - z = np.polyfit(x, y, 1) - return round(z[0], 2) - - @classmethod - def get_live_report(cls, entities): - base_directory = cls.get_entity_base_directory() - report = {} - live_headers = cls.live_csv_headers if cls.live_csv_headers else cls.csv_headers - live_headers = [h for h in live_headers if h not in cls.ignored_headers] - for index, header in enumerate(live_headers): - report[header] = copy.deepcopy(cls.csv_default_values[index]) - times = [] - live_report_paths = [] - for entity in entities: - entity_directory = os.path.join(base_directory, entity) - reports_directory = os.path.join(entity_directory, "reports", cls.reports_folder) - file_path = os.path.join(reports_directory, "live.csv") - if not os.path.exists(file_path): - continue - live_report_paths.append(file_path) - with open(file_path, "r") as live_file: - lastest_entry = deque(csv.DictReader(live_file), 1)[0] - times.append(datetime.strptime(lastest_entry["Time"], "%Y-%m-%d %H:%M:%S")) - for header in live_headers: - if lastest_entry[header][0].isdigit(): - report[header] += int(ast.literal_eval(lastest_entry[header])) - else: # It's a list - entry = ast.literal_eval(lastest_entry[header]) - if is_list_recursively_empty(report[header]): - report[header] = entry - else: - report[header] = [a + b for a, b in zip(report[header], entry)] - report["Time"] = "" - report["Trend"] = 0 - if times: - report["Time"] = str(min(times)) - trend_live_values = cls.get_trend_live_values(live_report_paths) - if trend_live_values: - report["Trend"] = cls.calculate_trend_value(trend_live_values) - return report - - @classmethod - def can_execute(cls, config, entity): - return True - - @classmethod - def get_report_date(cls): - if datetime.now().hour == 0: - # Pending to process the latest hour from yesterday - return date.today() - timedelta(days=1) - else: - return date.today() diff --git a/libs/metrics/dwell_time.py b/libs/metrics/dwell_time.py deleted file mode 100644 index 73b7c0bc..00000000 --- a/libs/metrics/dwell_time.py +++ /dev/null @@ -1,217 +0,0 @@ -import csv -import ast -import numpy as np -import os -from statistics import mean - -from collections import deque -from datetime import datetime, date -from typing import Dict, List, Iterator, Tuple - -from .base import BaseMetric, AggregationMode -from constants import DWELL_TIME - -DATE_FORMAT = "%Y-%m-%d %H:%M:%S" - - -class DwellTimeMetric(BaseMetric): - - reports_folder = DWELL_TIME - csv_headers = ["DetectedObjects", "L1", "L2", "L3", "L4", "L5", "AvgDwellTime", "MaxDwellTime", "Active"] - ignored_headers = ["Active"] - csv_default_values = [0, 0, 0, 0, 0, 0, 0, 0, "{}"] - aggregation_mode = AggregationMode.SINGLE - L1_THRESHOLD = 10 - L2_THRESHOLD = 30 - L3_THRESHOLD = 60 - L4_THRESHOLD = 180 - L5_THRESHOLD = 300 - ACTIVE_TRACK_INTERVAL = 10 - - @classmethod - def process_metric_csv_row(cls, csv_row: Dict, objects_logs: Dict): - row_time = datetime.strptime(csv_row["Timestamp"], "%Y-%m-%d %H:%M:%S") - detections = ast.literal_eval(csv_row["Detections"]) - row_hour = row_time.hour - if not objects_logs.get(row_hour): - objects_logs[row_hour] = {} - for index, d in enumerate(detections): - if not objects_logs[row_hour].get(d["tracking_id"]): - objects_logs[row_hour][d["tracking_id"]] = {"times": []} - objects_logs[row_hour][d["tracking_id"]]["times"].append( - { - "time": row_time - } - ) - objects_logs[row_hour]["latest_time"] = row_time - - @classmethod - def generate_hourly_metric_data(cls, config, objects_logs, entity=None): - reports_directory = os.path.join(entity.base_directory, "reports", cls.reports_folder) - daily_csv = os.path.join(reports_directory, "report_" + str(cls.get_report_date()) + ".csv") - latest_active_ids = _read_estimated_latest_active_ids(daily_csv) - return cls.calculate_metrics(objects_logs, latest_active_ids) - - @classmethod - def calculate_metrics(cls, objects_logs, latest_active_ids): - summary = np.zeros((len(objects_logs), 6), dtype=np.long) - result = [] - for index, hour in enumerate(sorted(objects_logs)): - hour_objects_detections = objects_logs[hour] - if "latest_time" not in hour_objects_detections: - result.append(cls.csv_default_values) - continue - latest_time = objects_logs[hour]["latest_time"] - max_s = 0 - total_s = 0 - active_after_hour = {} - previous_active_counted = set() - for track_id, detection_object in hour_objects_detections.items(): - if track_id == "latest_time": - continue - times = detection_object["times"] - if track_id in latest_active_ids: - start = datetime.strptime(latest_active_ids[track_id]["start"], DATE_FORMAT) - previous_active_counted.add(track_id) - else: - start = times[0]["time"] - end = times[-1]["time"] - dwell_seconds = (end - start).seconds - if dwell_seconds < 3: - # You don't count. You are invisible to us - continue - # If the detection was seen in the last ACTIVE_TRACK_INTERVAL seconds then - # we don't count it and only count it during the next hour - if (latest_time - end).seconds < cls.ACTIVE_TRACK_INTERVAL: - active_after_hour[track_id] = {"start": start.strftime(DATE_FORMAT), "time": dwell_seconds} - continue - - if dwell_seconds > max_s: - max_s = dwell_seconds - total_s += dwell_seconds - summary[index] += cls.get_level_result(dwell_seconds) - - # Count missing from previous hour - for track_id in latest_active_ids.keys(): - if track_id not in previous_active_counted: - dwell_seconds = latest_active_ids[track_id]["time"] - if dwell_seconds > max_s: - max_s = dwell_seconds - total_s += dwell_seconds - summary[index] += cls.get_level_result(dwell_seconds) - - hour_result = list(summary[index]) - # Avg - if summary[index][0] == 0: - hour_result.append(0) - else: - hour_result.append(round(total_s / summary[index][0], 2)) - # Max - hour_result.append(max_s) - # Active - hour_result.append(active_after_hour) - result.append(hour_result) - - latest_active_ids = active_after_hour - - return result - - @classmethod - def get_level_result(cls, dwell_seconds: int) -> Tuple[int, int, int, int, int, int]: - if dwell_seconds > cls.L5_THRESHOLD: - return 1, 0, 0, 0, 0, 1 - elif dwell_seconds > cls.L4_THRESHOLD: - return 1, 0, 0, 0, 1, 0 - elif dwell_seconds > cls.L3_THRESHOLD: - return 1, 0, 0, 1, 0, 0 - elif dwell_seconds > cls.L2_THRESHOLD: - return 1, 0, 1, 0, 0, 0 - elif dwell_seconds > cls.L1_THRESHOLD: - return 1, 1, 0, 0, 0, 0 - else: - return 1, 0, 0, 0, 0, 0 - - @classmethod - def generate_daily_csv_data(cls, yesterday_hourly_file): - detected_objects, l1, l2, l3, l4, l5 = 0, 0, 0, 0, 0, 0 - with open(yesterday_hourly_file, newline='') as csvfile: - reader = csv.DictReader(csvfile) - avg = [] - max_d = 0 - for row in reader: - detected_objects += int(row["DetectedObjects"]) - l1 += int(row["L1"]) - l2 += int(row["L2"]) - l3 += int(row["L3"]) - l4 += int(row["L4"]) - l5 += int(row["L5"]) - avg.append(float(row["AvgDwellTime"])) - max_d = max(max_d, int(row["MaxDwellTime"])) - return detected_objects, l1, l2, l3, l4, l5, round(mean(avg), 2), max_d, "{}" - - @classmethod - def generate_live_csv_data(cls, config, today_entity_csv, entity, entries_in_interval): - """ - Generates the live report using the `today_entity_csv` file received. - """ - roi_contour = cls.get_roi_contour_for_entity(config, entity["id"]) - live_csv = os.path.join(entity.base_directory, "reports", cls.reports_folder, "live.csv") - latest_active_ids = _read_estimated_latest_active_ids(live_csv) - with open(today_entity_csv, "r") as log: - objects_logs = {} - lastest_entries = deque(csv.DictReader(log), entries_in_interval) - for entry in lastest_entries: - cls.process_csv_row(entry, objects_logs, roi_contour) - metric_data = cls.calculate_metrics(objects_logs, latest_active_ids) - numerics = np.zeros(6, dtype=np.long) - avg = 0. - total = 0 - max_l = 0 - for h in metric_data: - numerics += h[:6] - avg += h[0] * h[6] - total += h[0] - max_l = max(max_l, h[7]) - avg = avg / total - result = list(numerics) - result.extend([avg, max_l, metric_data[-1][8]]) - return result - - @classmethod - def get_trend_live_values(cls, live_report_paths: Iterator[str]) -> Iterator[int]: - latest_dwell_time_results = {} - for n in range(10): - latest_dwell_time_results[n] = None - for live_path in live_report_paths: - with open(live_path, "r") as live_file: - lastest_10_entries = deque(csv.DictReader(live_file), 10) - for index, item in enumerate(lastest_10_entries): - if not latest_dwell_time_results[index]: - latest_dwell_time_results[index] = 0.0 - latest_dwell_time_results[index] += float(item["AvgDwellTime"]) - return [item for item in latest_dwell_time_results.values() if item is not None] - - @classmethod - def get_weekly_report(cls, entities: List[str], number_of_weeks: int = 0, - from_date: date = None, to_date: date = None) -> Dict: - # The occupancy metrics can not be aggregated using "sum" - weekly_report_data = cls.generate_weekly_report_data(entities, number_of_weeks, from_date, to_date) - report = {"Weeks": []} - for header in cls.csv_headers: - report[header] = [] - for week, week_data in weekly_report_data.items(): - report["Weeks"].append(week) - report["AvgDwellTime"].append(round(mean(week_data["AvgDwellTime"]), 2)) - report["MaxDwellTime"].append(max(week_data["MaxDwellTime"])) - for header in ["DetectedObjects", "L1", "L2", "L3", "L4", "L5"]: - report[header].append(sum(week_data[header])) - return report - - -def _read_estimated_latest_active_ids(file_path): - if os.path.exists(file_path): - with open(file_path, "r") as dwell_file: - latest_entry = deque(csv.DictReader(dwell_file), 1) - if len(latest_entry) != 0 and latest_entry[0]["Active"] != "": - return ast.literal_eval(latest_entry[0]["Active"]) - return {} diff --git a/libs/metrics/face_mask_usage.py b/libs/metrics/face_mask_usage.py deleted file mode 100644 index 9cef1d59..00000000 --- a/libs/metrics/face_mask_usage.py +++ /dev/null @@ -1,136 +0,0 @@ - -import ast -import csv -import numpy as np - -from collections import deque -from datetime import datetime -from typing import Dict, List, Iterator, Tuple - -from .base import BaseMetric, AggregationMode - - -class FaceMaskUsageMetric(BaseMetric): - - reports_folder = "face-mask-usage" - csv_headers = ["NoFace", "FaceWithMask", "FaceWithoutMask"] - csv_default_values = [0, 0, 0] - aggregationMode = AggregationMode.BATCH - - @classmethod - def process_metric_csv_row(cls, csv_row: Dict, objects_logs: Dict): - row_time = datetime.strptime(csv_row["Timestamp"], "%Y-%m-%d %H:%M:%S") - detections = ast.literal_eval(csv_row["Detections"]) - row_hour = row_time.hour - if not objects_logs.get(row_hour): - objects_logs[row_hour] = {} - for d in detections: - if not objects_logs[row_hour].get(d["tracking_id"]): - objects_logs[row_hour][d["tracking_id"]] = {"face_labels": []} - # Append social distancing violations - objects_logs[row_hour][d["tracking_id"]]["face_labels"].append(d.get("face_label", -1)) - - @classmethod - def generate_hourly_metric_data(cls, config, objects_logs, entity=None): - summary = np.zeros((len(objects_logs), 3), dtype=np.long) - for index, hour in enumerate(sorted(objects_logs)): - hour_objects_detections = objects_logs[hour] - for detection_object in hour_objects_detections.values(): - no_face_detections, mask_detections, no_mask_detections = cls.process_face_labels_for_object( - detection_object["face_labels"] - ) - summary[index] += (no_face_detections, mask_detections, no_mask_detections) - return summary - - @classmethod - def process_face_labels_for_object(cls, face_labels: List[int]) -> Tuple[int, int]: - """ - Receives a list with the "facesmask detections" (for a single person) and returns a - tuple with the summary of faces and mask detected. Consecutive detections in the same state are - grouped and returned as a single one. Detections lower than the constant PROCESSING_COUNT_THRESHOLD - are ignored. - - For example, the input [0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1 1, 1, - -1, -1, -1, -1, -1, -1, 0, 0, 0, 0] returns (3, 2). - """ - no_face_detections = 0 - mask_detections = 0 - no_mask_detections = 0 - if cls.aggregationMode == AggregationMode.SINGLE: - if len(face_labels) < 2: - return 0, 0, 0 - for face_label in face_labels: - if face_label == -1: - no_face_detections += 1 - elif face_label == 0: - mask_detections += 1 - else: - no_mask_detections += 1 - weight_factor = 5 - if no_face_detections > weight_factor * mask_detections and no_face_detections > weight_factor * no_mask_detections: - return 1, 0, 0 - elif mask_detections > no_mask_detections: - return 0, 1, 0 - else: - return 0, 0, 1 - else: - current_status = None - processing_status = None - processing_count = 0 - for face_label in face_labels: - if processing_status != face_label: - processing_status = face_label - processing_count = 0 - processing_count += 1 - if current_status != processing_status and processing_count >= cls.processing_count_threshold: - # FaceLabel was enouth time in the same state, change it - current_status = processing_status - if current_status == -1: - # Face was not detected - no_face_detections += 1 - elif current_status == 0: - # A face using mask was detected - mask_detections += 1 - else: - # current_status == 1 - # A face without mask was detected - no_mask_detections += 1 - return no_face_detections, mask_detections, no_mask_detections - - @classmethod - def generate_daily_csv_data(cls, yesterday_hourly_file): - total_no_face_detections, total_mask_detections, total_no_mask_detections = 0, 0, 0 - with open(yesterday_hourly_file, newline='') as csvfile: - reader = csv.DictReader(csvfile) - for row in reader: - total_no_face_detections += int(row["NoFace"]) - total_mask_detections += int(row["FaceWithMask"]) - total_no_mask_detections += int(row["FaceWithoutMask"]) - return total_no_face_detections, total_mask_detections, total_no_mask_detections - - @classmethod - def generate_live_csv_data(cls, config, today_entity_csv, entity, entries_in_interval): - """ - Generates the live report using the `today_entity_csv` file received. - """ - roi_contour = cls.get_roi_contour_for_entity(config, entity["id"]) - with open(today_entity_csv, "r") as log: - objects_logs = {} - lastest_entries = deque(csv.DictReader(log), entries_in_interval) - for entry in lastest_entries: - cls.process_csv_row(entry, objects_logs, roi_contour) - return np.sum(cls.generate_hourly_metric_data(config, objects_logs), axis=0) - - @classmethod - def get_trend_live_values(cls, live_report_paths: Iterator[str]) -> Iterator[int]: - latest_facemask_results = {} - for n in range(10): - latest_facemask_results[n] = None - for live_path in live_report_paths: - with open(live_path, "r") as live_file: - lastest_10_entries = deque(csv.DictReader(live_file), 10) - for index, item in enumerate(lastest_10_entries): - if not latest_facemask_results[index]: - latest_facemask_results[index] = 0 - latest_facemask_results[index] += int(item["FaceWithMask"]) - return [item for item in latest_facemask_results.values() if item is not None] diff --git a/libs/metrics/in_out.py b/libs/metrics/in_out.py deleted file mode 100644 index 89e5850d..00000000 --- a/libs/metrics/in_out.py +++ /dev/null @@ -1,302 +0,0 @@ -import ast -import csv -import json -import os -import numpy as np -import copy - -from collections import deque -from datetime import date -from typing import Dict, Iterator, List -from datetime import datetime -from statistics import mean - -from .base import BaseMetric -from constants import IN_OUT -from libs.utils.config import get_source_config_directory -from libs.utils.utils import validate_file_exists_and_is_not_empty, is_list_recursively_empty -from libs.utils.in_out import check_line_cross - - -class InOutMetric(BaseMetric): - - reports_folder = IN_OUT - csv_headers = ["In", "Out", "EstimatedMaxOccupancy", "EstimatedAverageOccupancy", "EstimatedLatestOccupancy", "Summary"] - csv_default_values = [0, 0, 0, 0, 0, [[], [], []]] - NUMBER_OF_PATH_SEGMENTS = 7 - SEGMENTATION_MINUTES = 10 - - @classmethod - def process_metric_csv_row(cls, csv_row: Dict, objects_logs: Dict): - row_time = datetime.strptime(csv_row["Timestamp"], "%Y-%m-%d %H:%M:%S") - detections = ast.literal_eval(csv_row["Detections"]) - row_hour, row_minute = row_time.hour, row_time.minute - intervals_per_hour = 60 // cls.SEGMENTATION_MINUTES - segment = row_minute // cls.SEGMENTATION_MINUTES - if not objects_logs.get(row_hour): - objects_logs[row_hour] = {key: {} for key in range(intervals_per_hour)} - for d in detections: - if not objects_logs[row_hour][segment].get(d["tracking_id"]): - objects_logs[row_hour][segment][d["tracking_id"]] = {"path": []} - # Append bottom middle positions - corners = d["bbox_real"] - x1, x2 = int(corners[0]), int(corners[2]) - _, y2 = int(corners[1]), int(corners[3]) - bottom_middle_position = (x1 + (x2 - x1) / 2, y2) - objects_logs[row_hour][segment][d["tracking_id"]]["path"].append(bottom_middle_position) - - @classmethod - def generate_daily_csv_data(cls, yesterday_hourly_file): - people_in, people_out = 0, 0 - estimated_max_occupancy, estimated_average_occupancy, boundary_names = [], [], [] - estimated_latest_occupancy = _read_estimated_latest_occupancy(yesterday_hourly_file) - with open(yesterday_hourly_file, newline="") as csvfile: - reader = csv.DictReader(csvfile) - for row in reader: - people_in += int(row["In"]) - people_out += int(row["Out"]) - estimated_max_occupancy.append(int(row["EstimatedMaxOccupancy"])) - estimated_average_occupancy.append(float(row["EstimatedAverageOccupancy"])) - - if not is_list_recursively_empty(row["Summary"]): - hourly_boundary_names, hourly_in, hourly_out = ast.literal_eval(row["Summary"]) - hourly_in, hourly_out = np.array(hourly_in, dtype=int), np.array(hourly_out, dtype=int) - if not boundary_names: - boundary_names = hourly_boundary_names - daily_in = np.zeros(len(boundary_names), dtype=int) - daily_out = np.zeros(len(boundary_names), dtype=int) - daily_in += hourly_in - daily_out += hourly_out - estimated_max_occupancy = max(estimated_max_occupancy) - estimated_average_occupancy = round(mean(estimated_average_occupancy), 2) - summary = [boundary_names, list(daily_in), list(daily_out)] - return people_in, people_out, estimated_max_occupancy, estimated_average_occupancy, estimated_latest_occupancy, summary - - @classmethod - def generate_hourly_metric_data(cls, config, objects_logs, entity): - boundaries = cls.retrieve_in_out_boundaries(config, entity["id"]) - boundary_names = [boundary["name"] for boundary in boundaries] - hourly_summary = [0, 0, 0, 0, 0, [boundary_names, [0] * len(boundaries), [0] * len(boundaries)]] - summary = [copy.deepcopy(hourly_summary) for x in range(len(objects_logs))] - reports_directory = os.path.join(entity.base_directory, "reports", cls.reports_folder) - daily_csv = os.path.join(reports_directory, "report_" + str(cls.get_report_date()) + ".csv") - latest_estimated_occupancy = _read_estimated_latest_occupancy(daily_csv) - - for index_hour, hour in enumerate(sorted(objects_logs)): - hour_in, hour_out, hour_balance = [], [], [] - cls._process_hourly_segments( - objects_logs[hour], latest_estimated_occupancy, boundaries, - hour_in, hour_out, hour_balance, summary[index_hour][5] - ) - if not hour_balance: - hour_balance = [0] - summary[index_hour][0] = sum(hour_in) - summary[index_hour][1] = sum(hour_out) - summary[index_hour][2] = max(0, max(hour_balance)) # estimated_max_occupancy - summary[index_hour][3] = max(0, round(mean(hour_balance), 2)) # estimated_average_occupancy - summary[index_hour][4] = max(0, hour_balance[-1]) # estimated_latest_occupancy - return summary - - @classmethod - def generate_live_csv_data(cls, config, today_entity_csv, entity, entries_in_interval): - """ - Generates the live report using the `today_entity_csv` file received. - """ - boundaries = cls.retrieve_in_out_boundaries(config, entity["id"]) - boundary_names = [boundary["name"] for boundary in boundaries] - roi_contour = cls.get_roi_contour_for_entity(config, entity["id"]) - - live_csv = os.path.join(entity.base_directory, "reports", cls.reports_folder, "live.csv") - latest_estimated_occupancy = _read_estimated_latest_occupancy(live_csv) - summary = [0, 0, 0, 0, 0, [boundary_names, [0] * len(boundaries), [0] * len(boundaries)]] - with open(today_entity_csv, "r") as log: - objects_logs = {} - lastest_entries = deque(csv.DictReader(log), entries_in_interval) - for entry in lastest_entries: - cls.process_csv_row(entry, objects_logs, roi_contour) - - hour_in, hour_out, hour_balance = [], [], [] - for hour in sorted(objects_logs): - cls._process_hourly_segments( - objects_logs[hour], latest_estimated_occupancy, boundaries, - hour_in, hour_out, hour_balance, summary[5] - ) - summary[0] = sum(hour_in) - summary[1] = sum(hour_out) - summary[2] = max(0, max(hour_balance)) # estimated_max_occupancy - summary[3] = max(0, round(mean(hour_balance), 2)) # estimated_average_occupancy - summary[4] = max(0, hour_balance[-1]) # estimated_latest_occupancy - return summary - - @classmethod - def get_trend_live_values(cls, live_report_paths: Iterator[str]) -> Iterator[int]: - latest_in_out_results = {} - for n in range(10): - latest_in_out_results[n] = None - for live_path in live_report_paths: - with open(live_path, "r") as live_file: - lastest_10_entries = deque(csv.DictReader(live_file), 10) - for index, item in enumerate(lastest_10_entries): - if not latest_in_out_results[index]: - latest_in_out_results[index] = 0 - latest_in_out_results[index] += int(item["In"]) + int(item["Out"]) - return [item for item in latest_in_out_results.values() if item is not None] - - @classmethod - def get_in_out_file_path(cls, camera_id, config): - """ Returns the path to the roi_contour file """ - return f"{get_source_config_directory(config)}/{camera_id}/{IN_OUT}/{IN_OUT}.json" - - @classmethod - def retrieve_in_out_boundaries(cls, config, camera_id): - boundary_path = cls.get_in_out_file_path(camera_id, config) - boundary_line = cls.read_in_out_boundaries(boundary_path) - if boundary_line is None: - raise Exception(f"Camera {camera_id} does not have a defined in/out boundary") - else: - return boundary_line["in_out_boundaries"] - - @classmethod - def read_in_out_boundaries(cls, in_out_file_path): - """ Given the path to the in-out file it loads it and returns it """ - if validate_file_exists_and_is_not_empty(in_out_file_path): - with open(in_out_file_path) as json_file: - in_out_boundaries = json.load(json_file) - return in_out_boundaries - else: - return None - - @classmethod - def can_execute(cls, config, entity): - boundary_line = cls.read_in_out_boundaries(cls.get_in_out_file_path(entity["id"], config)) - if boundary_line is None: - return False - return True - - @classmethod - def get_weekly_report(cls, entities: List[str], number_of_weeks: int = 0, - from_date: date = None, to_date: date = None) -> Dict: - # The In/Out metric cannot be fully aggregated using "sum" - weekly_report_data = cls.generate_weekly_report_data(entities, number_of_weeks, from_date, to_date) - report = { - "Weeks": [], - "InMax": [], - "OutMax": [], - "InAvg": [], - "OutAvg": [], - } - for header in cls.csv_headers: - report[header] = [] - for week, week_data in weekly_report_data.items(): - estimated_max_occ = max(week_data["EstimatedMaxOccupancy"]) if week_data["EstimatedMaxOccupancy"] else 0 - estimated_avg_occ = round(mean(week_data["EstimatedAverageOccupancy"]), 2) if week_data["EstimatedAverageOccupancy"] else 0 - estimated_latest_occ = round(week_data["EstimatedLatestOccupancy"][-1]) if week_data["EstimatedLatestOccupancy"] else 0 - in_sum = sum(week_data["In"]) - out_sum = sum(week_data["Out"]) - in_max = max(week_data["In"]) if week_data["In"] else 0 - out_max = max(week_data["Out"]) if week_data["Out"] else 0 - in_avg = round(mean(week_data["In"]), 2) if week_data["In"] else 0 - out_avg = round(mean(week_data["Out"]), 2) if week_data["Out"] else 0 - report["Weeks"].append(week) - report["In"].append(in_sum) - report["Out"].append(out_sum) - report["InMax"].append(in_max) - report["OutMax"].append(out_max) - report["InAvg"].append(in_avg) - report["OutAvg"].append(out_avg) - report["EstimatedMaxOccupancy"].append(estimated_max_occ) - report["EstimatedAverageOccupancy"].append(estimated_avg_occ) - report["EstimatedLatestOccupancy"].append(estimated_latest_occ) - if is_list_recursively_empty(week_data["Summary"]): - boundary_name = [] - weekly_in = [] - weekly_out = [] - else: - boundary_names, weekly_in, weekly_out = list(zip(*week_data["Summary"])) - boundary_name = next(x for x in boundary_names if not is_list_recursively_empty(x)) - weekly_in = _fill_partially_empty_result(weekly_in, 0) - weekly_out = _fill_partially_empty_result(weekly_out, 0) - - report["Summary"].append([ - boundary_name, - [sum(x) for x in zip(*weekly_in)], - [sum(x) for x in zip(*weekly_out)] - ]) - return report - - @classmethod - def _process_path(cls, boundary_line, trajectory_path, number_of_cuts=NUMBER_OF_PATH_SEGMENTS): - """ - Verify if a trajectory goes over a boundary line - Args: - Two coordinates [x,y] are in 2-tuples [A,B] - Boundaries of the in/out line. - If someone crosses the line while having A to their right, they are going in the in direction (entering) - Crossing the line while having A to their left means they are going in the out direction (leaving) - - trajectory_path: List of N 2-tuples (x,y) - That represents the trajectory of an object. - - Returns: - (in, out) : tuple - (1, 1) - if the object entered and left an equal number of times. - (1, 0) - if the object entered (in) - (0, 1) - if the object left (out) - (0, 0) - if the object didn't cross the boundary. - """ - if len(trajectory_path) < number_of_cuts: - number_of_cuts = len(trajectory_path) - - trajectory_steps = [trajectory_path[int(i)] for i in np.linspace(0, len(trajectory_path) - 1, number_of_cuts)] - trajectory_steps = zip(trajectory_steps, trajectory_steps[1:]) - total_in, total_out = 0, 0 - - for trajectory in trajectory_steps: - path_in, path_out = check_line_cross(boundary_line, trajectory) - total_in += path_in - total_out += path_out - - # Normalize in_out: - return (int(total_in >= total_out and total_in > 0), int(total_out >= total_in and total_out > 0)) - - @classmethod - def _process_hourly_segments( - cls, hourly_objects_logs, latest_estimated_occupancy, boundaries, # input - hour_in, hour_out, hour_balance, summary_report # output - ): - for index_segment, segment in enumerate(hourly_objects_logs): - segment_objects_detections = hourly_objects_logs[segment] - segment_in, segment_out = 0, 0 - for track_id, data in segment_objects_detections.items(): - path = data["path"] - for index_boundary, boundary in enumerate(boundaries): - boundary_line = boundary["in_out_boundary"] - new_in, new_out = cls._process_path(boundary_line, path) - segment_in += new_in - segment_out += new_out - summary_report[1][index_boundary] += new_in - summary_report[2][index_boundary] += new_out - latest_estimated_occupancy += (segment_in - segment_out) - hour_in.append(segment_in) - hour_out.append(segment_out) - hour_balance.append(latest_estimated_occupancy) - - -def _fill_partially_empty_result(tuple_of_lists, default_value): - tuple_of_lists = list(tuple_of_lists) - length_of_sublists = len(next(x for x in tuple_of_lists if not is_list_recursively_empty(x))) - for i in range(len(tuple_of_lists)): - if is_list_recursively_empty(tuple_of_lists[i]): - tuple_of_lists[i] = [default_value] * length_of_sublists - return tuple_of_lists - - -def _read_estimated_latest_occupancy(in_out_file_path): - def _is_today(entry): - return datetime.strptime(entry["Time"], "%Y-%m-%d %H:%M:%S").date() == datetime.today().date() - if os.path.exists(in_out_file_path): - with open(in_out_file_path, "r") as in_out_file: - latest_entry = deque(csv.DictReader(in_out_file), 1) - if len(latest_entry) != 0 and ("Time" not in latest_entry[0] or _is_today(latest_entry[0])): - return int(latest_entry[0]["EstimatedLatestOccupancy"]) - return 0 diff --git a/libs/metrics/social_distancing.py b/libs/metrics/social_distancing.py deleted file mode 100644 index b5aaaff1..00000000 --- a/libs/metrics/social_distancing.py +++ /dev/null @@ -1,205 +0,0 @@ -import csv -import ast -import numpy as np -import os - -from collections import deque -from datetime import datetime, date, timedelta -from typing import Dict, List, Iterator, Tuple - -from libs.utils.loggers import get_source_log_directory - -from .base import BaseMetric, AggregationMode -from constants import SOCIAL_DISTANCING - - -class SocialDistancingMetric(BaseMetric): - - reports_folder = SOCIAL_DISTANCING - csv_headers = ["DetectedObjects", "NoInfringement", "LowInfringement", "HighInfringement", - "CriticalInfringement"] - csv_default_values = [0, 0, 0, 0, 0] - aggregation_mode = AggregationMode.BATCH - - @classmethod - def process_metric_csv_row(cls, csv_row: Dict, objects_logs: Dict): - row_time = datetime.strptime(csv_row["Timestamp"], "%Y-%m-%d %H:%M:%S") - detections = ast.literal_eval(csv_row["Detections"]) - row_hour = row_time.hour - if not objects_logs.get(row_hour): - objects_logs[row_hour] = {} - for index, d in enumerate(detections): - if not objects_logs[row_hour].get(d["tracking_id"]): - objects_logs[row_hour][d["tracking_id"]] = {"distance_violations": []} - # Append social distancing violations - objects_logs[row_hour][d["tracking_id"]]["distance_violations"].append( - { - "time": row_time, - "infrigement": index in ast.literal_eval(csv_row["ViolationsIndexes"]) - } - ) - - @classmethod - def generate_hourly_metric_data(cls, config, objects_logs, entity=None): - summary = np.zeros((len(objects_logs), 5), dtype=np.long) - for index, hour in enumerate(sorted(objects_logs)): - hour_objects_detections = objects_logs[hour] - for detection_object in hour_objects_detections.values(): - summary[index] += cls.process_distance_violation_for_object( - detection_object["distance_violations"]) - return summary - - @classmethod - def process_distance_violation_for_object(cls, distance_violations: List[dict]) -> Tuple[int, int]: - """ - Receives a list with the "social distancing detections" (for a single person) and returns a - tuple with the summary of detections and violations (grouped by severity). Consecutive detections in - the same state are grouped and returned as a single one. Detections lower than the constant - PROCESSING_COUNT_THRESHOLD are ignored. - - The infrigement categories are : - - Low: Between 10 seconds 30 seconds - - High: Between 30 and 60 seconds - - Critical: More than 60 seconds - """ - # TODO: The categories values defined need to be updated taking into account the OMS recommendations. - # The current values only have demo purposes - current_status = None - processing_status = None - processing_count = 0 - current_status_start_time = None - processing_start_time = None - - CRITICAL_THRESHOLD = 60 - HIGH_THRESHOLD = 30 - LOW_TRESHOLD = 10 - - detections = [] - if distance_violations: - for dist_violation in distance_violations: - status = dist_violation["infrigement"] - if processing_status != status: - processing_status = status - processing_start_time = dist_violation["time"] - processing_count = 0 - processing_count += 1 - if current_status != processing_status and processing_count >= cls.processing_count_threshold: - # Object was enouth time in the same state, change it - if current_status is not None: - # Append the previous status in the detections list - seconds_in_status = (processing_start_time - current_status_start_time).seconds - detections.append({"status": status, "seconds": seconds_in_status}) - current_status = processing_status - current_status_start_time = processing_start_time - if current_status: - # Append the latest status - seconds_in_status = (distance_violations[-1]["time"] - current_status_start_time).seconds - detections.append({"status": status, "seconds": seconds_in_status}) - detected_objects, no_infringements, low_infringements, high_infringements, critical_infringements = 0, 0, 0, 0, 0 - for detection in detections: - detected_objects += 1 - if not detection["status"] or detection["seconds"] < LOW_TRESHOLD: - no_infringements += 1 - elif detection["seconds"] < HIGH_THRESHOLD: - low_infringements += 1 - elif detection["seconds"] < CRITICAL_THRESHOLD: - high_infringements += 1 - else: - # CRITICAL_THRESHOLD <= detection["time"] - critical_infringements += 1 - if cls.aggregation_mode == AggregationMode.SINGLE: - if critical_infringements > 0: - return 1, 0, 0, 0, 1 - elif high_infringements > 0: - return 1, 0, 0, 1, 0 - elif low_infringements > 0: - return 1, 0, 1, 0, 0 - elif detected_objects > 0: - return 1, 1, 0, 0, 0 - else: - return 0, 0, 0, 0, 0 - else: - return detected_objects, no_infringements, low_infringements, high_infringements, critical_infringements - - @classmethod - def generate_daily_csv_data(cls, yesterday_hourly_file): - detected_objects, no_infringements, low_infringements, high_infringements, critical_infringements = 0, 0, 0, 0, 0 - with open(yesterday_hourly_file, newline='') as csvfile: - reader = csv.DictReader(csvfile) - for row in reader: - detected_objects += int(row["DetectedObjects"]) - no_infringements += int(row["NoInfringement"]) - low_infringements += int(row["LowInfringement"]) - high_infringements += int(row["HighInfringement"]) - critical_infringements += int(row["CriticalInfringement"]) - return detected_objects, no_infringements, low_infringements, high_infringements, critical_infringements - - @classmethod - def create_heatmap_report(cls, config, yesterday_csv, heatmap_file, column): - heatmap_resolution = config.get_section_dict("App")["HeatmapResolution"].split(",") - heatmap_x = int(heatmap_resolution[0]) - heatmap_y = int(heatmap_resolution[1]) - heatmap_grid = np.zeros((heatmap_x, heatmap_y)) - - with open(yesterday_csv, newline='') as csvfile: - reader = csv.DictReader(csvfile) - for row in reader: - detections = ast.literal_eval(row['Detections']) - if column == 'Violations': - violations_indexes = ast.literal_eval(row['ViolationsIndexes']) - # Get bounding boxes of violations - detections = [detections[object_id] for object_id in violations_indexes] - - for detection in detections: - bbox = detection.get('bbox') - x = int((np.floor((bbox[0] + bbox[2]) * heatmap_x / 2)).item()) - y = int((np.floor((bbox[1] + bbox[3]) * heatmap_y / 2)).item()) - heatmap_grid[x][y] += 1 / (1 + heatmap_grid[x][y]) - np.save(heatmap_file, heatmap_grid) - - @classmethod - def compute_daily_metrics(cls, config): - super().compute_daily_metrics(config) - base_directory = get_source_log_directory(config) - entities = config.get_video_sources() - for entity in entities: - entity_directory = os.path.join(base_directory, entity["id"]) - objects_log_directory = os.path.join(entity_directory, "objects_log") - heatmaps_directory = os.path.join(entity_directory, "heatmaps") - # Create missing directories - os.makedirs(objects_log_directory, exist_ok=True) - os.makedirs(heatmaps_directory, exist_ok=True) - yesterday = str(date.today() - timedelta(days=1)) - yesterday_csv = os.path.join(objects_log_directory, yesterday + ".csv") - if os.path.isfile(yesterday_csv): - detection_heatmap_file = os.path.join(heatmaps_directory, "detections_heatmap_" + yesterday) - violation_heatmap_file = os.path.join(heatmaps_directory, "violations_heatmap_" + yesterday) - cls.create_heatmap_report(config, yesterday_csv, detection_heatmap_file, "Detections") - cls.create_heatmap_report(config, yesterday_csv, violation_heatmap_file, "Violations") - - @classmethod - def generate_live_csv_data(cls, config, today_entity_csv, entity, entries_in_interval): - """ - Generates the live report using the `today_entity_csv` file received. - """ - roi_contour = cls.get_roi_contour_for_entity(config, entity["id"]) - with open(today_entity_csv, "r") as log: - objects_logs = {} - lastest_entries = deque(csv.DictReader(log), entries_in_interval) - for entry in lastest_entries: - cls.process_csv_row(entry, objects_logs, roi_contour) - return np.sum(cls.generate_hourly_metric_data(config, objects_logs), axis=0) - - @classmethod - def get_trend_live_values(cls, live_report_paths: Iterator[str]) -> Iterator[int]: - latest_social_distancing_results = {} - for n in range(10): - latest_social_distancing_results[n] = None - for live_path in live_report_paths: - with open(live_path, "r") as live_file: - lastest_10_entries = deque(csv.DictReader(live_file), 10) - for index, item in enumerate(lastest_10_entries): - if not latest_social_distancing_results[index]: - latest_social_distancing_results[index] = 0 - latest_social_distancing_results[index] += int(item["DetectedObjects"]) - int(item["NoInfringement"]) - return [item for item in latest_social_distancing_results.values() if item is not None] diff --git a/libs/metrics/utils.py b/libs/metrics/utils.py deleted file mode 100644 index 33ff5d13..00000000 --- a/libs/metrics/utils.py +++ /dev/null @@ -1,64 +0,0 @@ -import numpy as np -import os -import pandas as pd - -from .face_mask_usage import FaceMaskUsageMetric -from .social_distancing import SocialDistancingMetric -from .in_out import InOutMetric -from .dwell_time import DwellTimeMetric - - -def compute_hourly_metrics(config): - SocialDistancingMetric.compute_hourly_metrics(config) - FaceMaskUsageMetric.compute_hourly_metrics(config) - InOutMetric.compute_hourly_metrics(config) - DwellTimeMetric.compute_hourly_metrics(config) - - -def compute_daily_metrics(config): - SocialDistancingMetric.compute_daily_metrics(config) - FaceMaskUsageMetric.compute_daily_metrics(config) - InOutMetric.compute_daily_metrics(config) - DwellTimeMetric.compute_daily_metrics(config) - - -def compute_live_metrics(config, live_interval): - SocialDistancingMetric.compute_live_metrics(config, live_interval) - FaceMaskUsageMetric.compute_live_metrics(config, live_interval) - InOutMetric.compute_live_metrics(config, live_interval) - DwellTimeMetric.compute_live_metrics(config, live_interval) - - -def generate_heatmap(camera_id, from_date, to_date, report_type): - """Returns the sum of the heatmaps for a specified range of dates - Args: - camera_id (str): id of an existing camera - from_date (date): start of the date range - to_date (date): end of the date range - report_type (str): { 'violations', 'detections' } - - Returns: - result (dict): { - 'heatmap': [(150,150) grid], - 'not_found_dates': [array[str]] - } - """ - log_dir = os.getenv('SourceLogDirectory') - heatmap_resolution = os.getenv('HeatmapResolution').split(",") - heatmap_x = int(heatmap_resolution[0]) - heatmap_y = int(heatmap_resolution[1]) - file_path = os.path.join(log_dir, camera_id, "heatmaps", f"{report_type}_heatmap_") - - date_range = pd.date_range(start=from_date, end=to_date) - heatmap_total = np.zeros((heatmap_x, heatmap_y)) - not_found_dates = [] - - for report_date in date_range: - try: - heatmap = np.load(f"{file_path}{report_date.strftime('%Y-%m-%d')}.npy") - heatmap_total = np.add(heatmap_total, heatmap) - except IOError: - not_found_dates.append(report_date.strftime('%Y-%m-%d')) - - return {"heatmap": heatmap_total.tolist(), - "not_found_dates": not_found_dates} diff --git a/libs/reports/notifications.py b/libs/reports/notifications.py index dd170f36..e7a88167 100644 --- a/libs/reports/notifications.py +++ b/libs/reports/notifications.py @@ -7,7 +7,6 @@ from datetime import date, timedelta from libs.notifications.slack_notifications import SlackService, is_slack_configured -from libs.metrics import SocialDistancingMetric from libs.utils.mailing import MailService, is_mailing_configured from libs.utils.loggers import get_source_log_directory @@ -23,7 +22,7 @@ def get_daily_report(config, entity_info, report_date): if entity_type == 'Camera': reports_directory = os.path.join(log_directory, entity_info['id'], "reports") daily_csv_file_paths = [ - os.path.join(reports_directory, SocialDistancingMetric.reports_folder ,'report_' + report_date + '.csv') + os.path.join(reports_directory, '','report_' + report_date + '.csv') ] else: raise NotImplementedError diff --git a/run_periodic_task.py b/run_periodic_task.py index 1c848f93..0022c072 100644 --- a/run_periodic_task.py +++ b/run_periodic_task.py @@ -3,9 +3,8 @@ import schedule import time -from libs.backups.s3_backup import raw_data_backup, reports_backup +from libs.backups.s3_backup import raw_data_backup from libs.config_engine import ConfigEngine -from libs.metrics.utils import compute_hourly_metrics, compute_daily_metrics, compute_live_metrics from libs.reports.notifications import (send_daily_report_notification, send_daily_global_report, send_weekly_global_report) @@ -23,14 +22,7 @@ def main(config): if not config.get_boolean(p_task, "Enabled"): continue task_name = config.get_section_dict(p_task).get("Name") - if task_name == "metrics": - logger.info("Metrics enabled!") - schedule.every().day.at("00:01").do(compute_daily_metrics, config=config) - schedule.every().hour.at(":01").do(compute_hourly_metrics, config=config) - live_interval = int(config.get_section_dict(p_task).get("LiveInterval", 10)) - schedule.every(live_interval).minutes.do( - compute_live_metrics, config=config, live_interval=live_interval) - elif task_name == "s3_backup": + if task_name == "s3_backup": bucket_name = config.get_section_dict(p_task).get("BackupS3Bucket") if not bucket_name: logger.info("S3 Backup task doesn't have a bucket configured.") @@ -38,7 +30,6 @@ def main(config): logger.info("Backup enabled!") backup_interval = int(config.get_section_dict(p_task).get("BackupInterval", 30)) schedule.every(backup_interval).minutes.do(raw_data_backup, config=config, bucket_name=bucket_name) - schedule.every().day.at("00:30").do(reports_backup, config=config, bucket_name=bucket_name) else: raise ValueError(f"Not supported periodic task named: {task_name}") From 33d110c2a32bd5b529f94477af1ef8ff84a2e2f2 Mon Sep 17 00:00:00 2001 From: Pablo Grill Date: Thu, 29 Jul 2021 18:00:22 -0300 Subject: [PATCH 3/7] Remove notification and reports tasks from processor. --- api/models/app.py | 6 - api/models/base.py | 9 - api/models/camera.py | 4 +- api/models/config.py | 17 - api/models/metrics.py | 385 ------------------ api/processor_api.py | 2 - api/routers/config.py | 26 +- api/routers/slack.py | 88 ---- api/tests/app/test_app.py | 12 - api/tests/app/test_camera.py | 34 -- api/tests/app/test_config.py | 117 ------ api/tests/app/test_ml_models.py | 6 - api/tests/data/config-x86-openvino_EMPTY.ini | 7 - .../data/config-x86-openvino_JUST_CAMERAS.ini | 19 - .../data/config-x86-openvino_METRICS.ini | 36 -- api/tests/utils/common_functions.py | 6 - api/tests/utils/example_models.py | 30 -- config-coral.ini | 12 - config-jetson-nano.ini | 13 - config-jetson-tx2.ini | 13 - config-x86-gpu-tensorrt.ini | 14 - config-x86-gpu.ini | 12 - config-x86-openvino.ini | 13 - config-x86.ini | 16 +- libs/config_engine.py | 24 +- libs/entities/base_entity.py | 12 +- libs/entities/video_source.py | 12 +- libs/notifications/__init__.py | 0 libs/notifications/slack_notifications.py | 77 ---- libs/processor_core.py | 18 - libs/reports/__init__.py | 0 libs/reports/notifications.py | 83 ---- libs/utils/_global_entity_report.html | 18 - libs/utils/mail_daily_report.html | 83 ---- libs/utils/mail_global_report.html | 74 ---- libs/utils/mail_occupancy_notification.html | 65 --- libs/utils/mail_violations_notification.html | 61 --- libs/utils/mailing.py | 107 ----- libs/utils/notifications.py | 51 --- run_periodic_task.py | 17 - 40 files changed, 8 insertions(+), 1591 deletions(-) delete mode 100644 api/models/metrics.py delete mode 100644 api/routers/slack.py delete mode 100644 libs/notifications/__init__.py delete mode 100644 libs/notifications/slack_notifications.py delete mode 100644 libs/reports/__init__.py delete mode 100644 libs/reports/notifications.py delete mode 100644 libs/utils/_global_entity_report.html delete mode 100644 libs/utils/mail_daily_report.html delete mode 100644 libs/utils/mail_global_report.html delete mode 100644 libs/utils/mail_occupancy_notification.html delete mode 100644 libs/utils/mail_violations_notification.html delete mode 100644 libs/utils/mailing.py delete mode 100644 libs/utils/notifications.py diff --git a/api/models/app.py b/api/models/app.py index 8fe4ffd4..ab48d1df 100644 --- a/api/models/app.py +++ b/api/models/app.py @@ -11,13 +11,7 @@ class AppDTO(SnakeModel): maxProcesses: int = Field(1) dashboardURL: str = Field("http://0.0.0.0:8000") dashboardAuthorizationToken: str = Field("", example="token") - slackChannel: Optional[str] = Field("", example="lanthorn-notifications") - occupancyAlertsMinInterval: int = Field(0, example=180) maxThreadRestarts: int = Field(5) - globalReportingEmails: Optional[str] = Field("", example="email@email,email2@email") - globalReportTime: str = Field("06:00") - dailyGlobalReport: bool = Field(False) - weeklyGlobalReport: bool = Field(False) heatmapResolution = Field("150,150") logPerformanceMetrics: bool = Field(False) logPerformanceMetricsDirectory: str = Field("", example="/repo/data/processor/static/data/performace-metrics") diff --git a/api/models/base.py b/api/models/base.py index 72910a23..4f420195 100644 --- a/api/models/base.py +++ b/api/models/base.py @@ -18,15 +18,6 @@ class EntityConfigDTO(SnakeModel): name: str = Field(example='Kitchen') -class NotificationConfig(SnakeModel): - violationThreshold: Optional[int] = Field(0, example=100) - notifyEveryMinutes: Optional[int] = Field(0, example=15) - emails: Optional[str] = Field("", example='john@email.com,doe@email.com') - enableSlackNotifications: Optional[bool] = Field(False, example=False) - dailyReport: Optional[bool] = Field(False, example=True) - dailyReportTime: Optional[str] = Field('06:00', example='06:00') - - class OptionalSectionConfig(SnakeModel): name: str = Field(example="objects_filtering") enabled: bool diff --git a/api/models/camera.py b/api/models/camera.py index 276071b4..05e221bf 100644 --- a/api/models/camera.py +++ b/api/models/camera.py @@ -4,10 +4,10 @@ from pydantic import BaseModel, Field, validator from typing import List, Optional, Tuple -from .base import EntityConfigDTO, NotificationConfig, SnakeModel +from .base import EntityConfigDTO, SnakeModel -class CameraDTO(EntityConfigDTO, NotificationConfig): +class CameraDTO(EntityConfigDTO): videoPath: str = Field(example='/repo/data/softbio_vid.mp4') tags: Optional[str] = Field("", example='kitchen,living_room') image: Optional[str] = Field("", example='Base64 image') diff --git a/api/models/config.py b/api/models/config.py index 6fd2d631..b1e22f99 100644 --- a/api/models/config.py +++ b/api/models/config.py @@ -29,23 +29,6 @@ class ConfigDTO(SnakeModel): periodicTasks: Optional[List[PeriodicTaskDTO]] = [] -class GlobalReportingEmailsInfo(BaseModel): - emails: Optional[str] = Field("", example='john@email.com,doe@email.com') - time: Optional[str] = Field("06:00") - daily: Optional[bool] = Field(False, example=True) - weekly: Optional[bool] = Field(False, example=True) - - class Config: - schema_extra = { - "example": { - "emails": "john@email.com,doe@email.com", - "time": "06:00", - "daily": True, - "weekly": True - } - } - - class ConfigInfo(BaseModel): version: str device: str diff --git a/api/models/metrics.py b/api/models/metrics.py deleted file mode 100644 index aafdede6..00000000 --- a/api/models/metrics.py +++ /dev/null @@ -1,385 +0,0 @@ -from typing import List, Tuple - -from .base import SnakeModel - - -class HeatmapReport(SnakeModel): - heatmap: List[List[float]] - not_found_dates: List[str] - - class Config: - schema_extra = { - "example": [{ - "heatmap": "[[0.0,3.0,1.0,2.0,...],[3.0,1.34234,5.2342342,...],...]", - "not_found_dates": [ - "2020-08-14", - "2020-08-15", - "2020-08-16" - ] - }] - } - - -class HourlyReports(SnakeModel): - Hours: List[int] - - -class DailyReport(SnakeModel): - Dates: List[str] - - -class WeeklyReport(SnakeModel): - Weeks: List[str] - - -class LiveReport(SnakeModel): - Time: str - Trend: float - - -class SocialDistancing(SnakeModel): - DetectedObjects: List[int] - NoInfringement: List[int] - LowInfringement: List[int] - HighInfringement: List[int] - CriticalInfringement: List[int] - - -class SocialDistancingLive(LiveReport): - DetectedObjects: int - NoInfringement: int - LowInfringement: int - HighInfringement: int - CriticalInfringement: int - - -class SocialDistancingHourly(HourlyReports, SocialDistancing): - class Config: - schema_extra = { - "example": [{ - "hours": list(range(0, 23)), - "detected_objects": [0, 7273, 10011, 0, 0, 7273, 10011, 0, 0, 7273, 10011, 0, - 0, 7273, 10011, 0, 0, 7273, 10011, 0, 0, 7273, 10011, 0], - "no_infringement": [0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0, - 0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0], - "low_infringement": [0, 7273, 10011, 0, 0, 7273, 10011, 0, 0, 7273, 10011, 0, - 0, 7273, 10011, 0, 0., 7273, 10011, 0, 0, 7273, 10011, 0], - "high_infringement": [0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0, - 0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0], - "critical_infringement": [0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0, - 0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0] - }] - } - - -class SocialDistancingDaily(DailyReport, SocialDistancing): - class Config: - schema_extra = { - "example": [{ - "dates": ["2020-08-15", "2020-08-16", "2020-08-17", "2020-08-18"], - "detected_objects": [0, 7273, 10011, 0], - "no_infringement": [0, 4920, 6701, 0], - "low_infringement": [0, 7273, 10011, 0], - "high_infringement": [0, 4920, 6701, 0], - "critical_infringement": [0, 4920, 6701, 0], - }] - } - - -class SocialDistancingWeekly(WeeklyReport, SocialDistancing): - class Config: - schema_extra = { - "example": [{ - "weeks": ["2020-07-03 2020-07-05", "2020-07-06 2020-07-12", "2020-07-13 2020-07-19", "2020-07-20 2020-07-26"], - "detected_objects": [0, 7273, 10011, 0], - "no_infringement": [0, 4920, 6701, 0], - "low_infringement": [0, 7273, 10011, 0], - "high_infringement": [0, 4920, 6701, 0], - "critical_infringement": [0, 4920, 6701, 0], - }] - } - - -class DwellTime(SnakeModel): - DetectedObjects: List[int] - AvgDwellTime: List[float] - MaxDwellTime: List[int] - L1: List[int] - L2: List[int] - L3: List[int] - L4: List[int] - L5: List[int] - - -class DwellTimeLive(LiveReport): - DetectedObjects: int - AvgDwellTime: float - MaxDwellTime: int - L1: int - L2: int - L3: int - L4: int - L5: int - - -class DwellTimeHourly(HourlyReports, DwellTime): - class Config: - schema_extra = { - "example": [{ - "hours": list(range(0, 23)), - "detected_objects": [0, 7273, 10011, 0, 0, 7273, 10011, 0, 0, 7273, 10011, 0, - 0, 7273, 10011, 0, 0, 7273, 10011, 0, 0, 7273, 10011, 0], - "l1": [0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0, - 0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0], - "l2": [0, 7273, 10011, 0, 0, 7273, 10011, 0, 0, 7273, 10011, 0, - 0, 7273, 10011, 0, 0., 7273, 10011, 0, 0, 7273, 10011, 0], - "l3": [0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0, - 0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0], - "l4": [0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0, - 0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0], - "l5": [0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0, - 0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0], - "avg_dwell_time": [0.0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0.0, - 0.0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0.0], - "max_dwell_time": [0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0, - 0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0], - }] - } - - -class DwellTimeDaily(DailyReport, DwellTime): - class Config: - schema_extra = { - "example": [{ - "dates": ["2020-08-15", "2020-08-16", "2020-08-17", "2020-08-18"], - "detected_objects": [0, 7273, 10011, 0], - "l1": [0, 4920, 6701, 0], - "l2": [0, 7273, 10011, 0], - "l3": [0, 4920, 6701, 0], - "l4": [0, 4920, 6701, 0], - "l5": [0, 4920, 6701, 0], - "avg_dwell_time": [0.0, 4920, 6701, 0], - "max_dwell_time": [0, 4920, 6701, 0], - }] - } - - -class DwellTimeWeekly(WeeklyReport, DwellTime): - class Config: - schema_extra = { - "example": [{ - "weeks": ["2020-07-03 2020-07-05", "2020-07-06 2020-07-12", "2020-07-13 2020-07-19", "2020-07-20 2020-07-26"], - "l1": [0, 4920, 6701, 0], - "l2": [0, 7273, 10011, 0], - "l3": [0, 4920, 6701, 0], - "l4": [0, 4920, 6701, 0], - "l5": [0, 4920, 6701, 0], - "avg_dwell_time": [0.0, 4920, 6701, 0], - "max_dwell_time": [0, 4920, 6701, 0], - }] - } - - -class FaceMask(SnakeModel): - NoFace: List[int] - FaceWithMask: List[int] - FaceWithoutMask: List[int] - - -class FaceMaskLive(LiveReport): - NoFace: int - FaceWithMask: int - FaceWithoutMask: int - - -class FaceMaskHourly(HourlyReports, FaceMask): - class Config: - schema_extra = { - "example": [{ - "hours": list(range(0, 23)), - "no_face": [0, 7273, 10011, 0, 0, 7273, 10011, 0, 0, 7273, 10011, 0, - 0, 7273, 10011, 0, 0, 7273, 10011, 0, 0, 7273, 10011, 0], - "face_with_mask": [0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0, - 0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0], - "face_without_mask": [0, 7273, 10011, 0, 0, 7273, 10011, 0, 0, 7273, 10011, 0, - 0, 7273, 10011, 0, 0., 7273, 10011, 0, 0, 7273, 10011, 0], - }] - } - - -class FaceMaskDaily(DailyReport, FaceMask): - class Config: - schema_extra = { - "example": [{ - "dates": ["2020-08-15", "2020-08-16", "2020-08-17", "2020-08-18"], - "no_face": [0, 7273, 10011, 0], - "face_without_mask": [0, 4920, 6701, 0], - "face_with_mask": [0, 7273, 10011, 0], - }] - } - - -class FaceMaskWeekly(WeeklyReport, FaceMask): - class Config: - schema_extra = { - "example": [{ - "weeks": ["2020-07-03 2020-07-05", "2020-07-06 2020-07-12", "2020-07-13 2020-07-19", "2020-07-20 2020-07-26"], - "no_face": [0, 7273, 10011, 0], - "face_without_mask": [0, 4920, 6701, 0], - "face_with_mask": [0, 7273, 10011, 0], - }] - } - - -class Occupancy(SnakeModel): - OccupancyThreshold: List[int] - AverageOccupancy: List[float] - MaxOccupancy: List[float] - - -class OccupancyLive(LiveReport): - AverageOccupancy: int - MaxOccupancy: int - OccupancyThreshold: int - Violations: int - - -class OccupancyHourly(HourlyReports, Occupancy): - class Config: - schema_extra = { - "example": [{ - "hours": list(range(0, 23)), - "average_occupancy": [0, 7273, 10011, 0, 0, 7273, 10011, 0, 0, 7273, 10011, 0, - 0, 7273, 10011, 0, 0, 7273, 10011, 0, 0, 7273, 10011, 0], - "max_occupancy": [0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0, - 0, 4920, 6701, 0, 0, 4920, 6701, 0, 0, 4920, 6701, 0] - }] - } - - -class OccupancyDaily(DailyReport, Occupancy): - class Config: - schema_extra = { - "example": [{ - "dates": ["2020-08-15", "2020-08-16", "2020-08-17", "2020-08-18"], - "average_occupancy": [0, 7273, 10011, 0], - "max_occupancy": [0, 4920, 6701, 0], - }] - } - - -class OccupancyWeekly(WeeklyReport, Occupancy): - class Config: - schema_extra = { - "example": [{ - "weeks": ["2020-07-03 2020-07-05", "2020-07-06 2020-07-12", "2020-07-13 2020-07-19", "2020-07-20 2020-07-26"], - "average_occupancy": [0, 7273, 10011, 0], - "max_occupancy": [0, 4920, 6701, 0], - }] - } - - -class InOut(SnakeModel): - In: List[int] - Out: List[int] - EstimatedMaxOccupancy: List[int] - EstimatedAverageOccupancy: List[float] - EstimatedLatestOccupancy: List[int] - Summary: List[Tuple[List[str], List[int], List[int]]] - - -class InOutLive(LiveReport): - In: int - Out: int - EstimatedMaxOccupancy: int - EstimatedAverageOccupancy: float - EstimatedLatestOccupancy: int - Summary: Tuple[List[str], List[int], List[int]] - - -class InOutHourly(HourlyReports, InOut): - class Config: - schema_extra = { - "example": [{ - "hours": list(range(0, 23)), - "in": [0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 0, 7, 0, 3, 3, 0, 0, 0, 2, 1, 0, 0, 0, 0], - "out": [0, 0, 0, 0, 0, 0, 1, 0, 2, 0, 1, 2, 1, 2, 5, 3, 3, 2, 2, 1, 0, 0, 0, 0], - "estimated_max_occupancy": [0, 0, 0, 0, 0, 1, 2, 4, 5, 7, 7, 11, 10, 11, 7, 5, 2, 2, 1, 0, 0, 0, 0, 0], - "estimated_average_occupancy": [0, 0, 0, 0, 0, 0.5, 1.2, 3, 4.2, 7, 6, 8.4, 5, 5, 3, 2, 2, 1, 0.5, 0, 0, 0, 0, 0], - "estimated_latest_occupancy": [0, 0, 0, 0, 0, 1, 1, 3, 3, 6, 5, 10, 9, 10, 8, 5, 2, 0, 0, 0, 0, 0, 0, 0], - "summary": [ - [['Left Door', 'Right Door'], [0, 0], [0, 0]], - [['Left Door', 'Right Door'], [0, 0], [0, 0]], - [['Left Door', 'Right Door'], [0, 0], [0, 0]], - [['Left Door', 'Right Door'], [0, 0], [0, 0]], - [['Left Door', 'Right Door'], [0, 0], [0, 0]], - [['Left Door', 'Right Door'], [0, 1], [0, 0]], - [['Left Door', 'Right Door'], [1, 0], [1, 0]], - [['Left Door', 'Right Door'], [0, 2], [0, 0]], - [['Left Door', 'Right Door'], [2, 0], [0, 2]], - [['Left Door', 'Right Door'], [0, 3], [0, 0]], - [['Left Door', 'Right Door'], [0, 0], [1, 0]], - [['Left Door', 'Right Door'], [4, 3], [1, 1]], - [['Left Door', 'Right Door'], [0, 0], [0, 1]], - [['Left Door', 'Right Door'], [0, 3], [2, 0]], - [['Left Door', 'Right Door'], [1, 2], [4, 1]], - [['Left Door', 'Right Door'], [0, 0], [3, 0]], - [['Left Door', 'Right Door'], [0, 0], [3, 0]], - [['Left Door', 'Right Door'], [0, 0], [1, 1]], - [['Left Door', 'Right Door'], [0, 0], [0, 2]], - [['Left Door', 'Right Door'], [0, 0], [1, 0]], - [['Left Door', 'Right Door'], [0, 0], [0, 0]], - [['Left Door', 'Right Door'], [0, 0], [0, 0]], - [['Left Door', 'Right Door'], [0, 0], [0, 0]], - [['Left Door', 'Right Door'], [0, 0], [0, 0]] - ] - }] - } - - -class InOutDaily(DailyReport, InOut): - class Config: - schema_extra = { - "example": [{ - "dates": ["2020-08-15", "2020-08-16", "2020-08-17", "2020-08-18"], - "in": [4, 23, 50, 0], - "out": [4, 23, 50, 0], - "estimated_max_occupancy": [4, 23, 50, 0], - "estimated_average_occupancy": [3, 19.5, 40, 0], - "estimated_latest_occupancy": [0, 0, 0, 0], - "summary": [ - [['Left Door', 'Right Door'], [2, 2], [3, 1]], - [['Left Door', 'Right Door'], [12, 11], [12, 11]], - [['Left Door', 'Right Door'], [42, 8], [8, 42]], - [['Left Door', 'Right Door'], [0, 0], [0, 0]] - ] - }] - } - - -class InOutWeekly(WeeklyReport, InOut): - InMax: List[int] - OutMax: List[int] - InAvg: List[int] - OutAvg: List[int] - class Config: - schema_extra = { - "example": [{ - "weeks": ["2020-07-03 2020-07-05", "2020-07-06 2020-07-12", "2020-07-13 2020-07-19", "2020-07-20 2020-07-26"], - "in": [40, 420, 300, 0], - "out": [40, 420, 300, 0], - "in_max": [4, 23, 50, 0], - "out_max": [4, 23, 50, 0], - "in_avg": [4, 23, 50, 0], - "out_avg": [4, 23, 50, 0], - "estimated_max_occupancy": [40, 420, 300, 0], - "estimated_average_occupancy": [27.9, 376, 285.4, 0], - "estimated_latest_occupancy": [0, 0, 0, 0], - "summary": [ - [['Left Door', 'Right Door'], [20, 20], [30, 10]], - [['Left Door', 'Right Door'], [300, 120], [300, 120]], - [['Left Door', 'Right Door'], [150, 150], [150, 150]], - [['Left Door', 'Right Door'], [0, 0], [0, 0]] - ] - }] - } diff --git a/api/processor_api.py b/api/processor_api.py index 9e3e7b4e..c07ce7d2 100644 --- a/api/processor_api.py +++ b/api/processor_api.py @@ -23,7 +23,6 @@ from .routers.detector import detector_router from .routers.export import export_router from .routers.periodic_tasks import periodic_tasks_router -from .routers.slack import slack_router from .routers.source_loggers import source_loggers_router from .routers.source_post_processors import source_post_processors_router from .routers.static import static_router @@ -80,7 +79,6 @@ def create_fastapi_app(self): app.include_router(source_loggers_router, prefix="/source_loggers", tags=["Source Loggers"], dependencies=dependencies) app.include_router(periodic_tasks_router, prefix="/periodic_tasks", tags=["Periodic Tasks"], dependencies=dependencies) app.include_router(export_router, prefix="/export", tags=["Export"], dependencies=dependencies) - app.include_router(slack_router, prefix="/slack", tags=["Slack"], dependencies=dependencies) app.include_router(auth_router, prefix="/auth", tags=["Auth"]) app.include_router(static_router, prefix="/static", dependencies=dependencies) app.include_router(ml_model_router, prefix="/ml_model", tags=["ML Models"], dependencies=dependencies) diff --git a/api/routers/config.py b/api/routers/config.py index 8ff266fc..981712d9 100644 --- a/api/routers/config.py +++ b/api/routers/config.py @@ -3,7 +3,7 @@ from fastapi import APIRouter from typing import Optional -from api.models.config import ConfigDTO, ConfigInfo, GlobalReportingEmailsInfo +from api.models.config import ConfigDTO, ConfigInfo from api.utils import ( get_config, extract_config, handle_response, update_config, map_section_from_config, map_to_config_file_format ) @@ -106,27 +106,3 @@ async def get_processor_info(): Returns basic info regarding this processor """ return processor_info(get_config()) - - -@config_router.get("/global_report", response_model=GlobalReportingEmailsInfo) -async def get_report_info(): - app_config = extract_config()["App"] - return { - "emails": app_config["GlobalReportingEmails"], - "time": app_config["GlobalReportTime"], - "daily": get_config().get_boolean("App", "DailyGlobalReport"), - "weekly": get_config().get_boolean("App", "WeeklyGlobalReport") - } - - -@config_router.put("/global_report") -async def update_report_info(global_report_info: GlobalReportingEmailsInfo, reboot_processor: Optional[bool] = True): - global_report_info = global_report_info.dict(exclude_unset=True, exclude_none=True) - config_dict = extract_config() - key_mapping = {"GlobalReportingEmails": "emails", "GlobalReportTime": "time", - "DailyGlobalReport": "daily", "WeeklyGlobalReport": "weekly"} - for key, value in key_mapping.items(): - if value in global_report_info: - config_dict["App"][key] = str(global_report_info[value]) - success = update_config(config_dict, reboot_processor) - return handle_response(config_dict, success) diff --git a/api/routers/slack.py b/api/routers/slack.py deleted file mode 100644 index e0b5fbbd..00000000 --- a/api/routers/slack.py +++ /dev/null @@ -1,88 +0,0 @@ -import logging - -from fastapi import APIRouter, status -from pydantic import BaseModel -from typing import Optional - - -from api.utils import handle_response, update_config -from libs.notifications.slack_notifications import is_slack_configured - -logger = logging.getLogger(__name__) - -slack_router = APIRouter() - - -class SlackConfig(BaseModel): - user_token: str - channel: Optional[str] - - class Config: - schema_extra = { - "example": { - "user_token": "xxxx-ffff..." - } - } - - -class SlackIsEnabled(BaseModel): - enabled: bool - - -def add_slack_channel_to_config(channel, reboot_processor): - logger.info("Adding slack's channel on processor's config") - config_dict = dict() - config_dict["App"] = dict({"SlackChannel": channel}) - - success = update_config(config_dict, reboot_processor) - return handle_response(config_dict, success) - - -def write_user_token(token): - logger.info("Writing user access token") - with open("slack_token.txt", "w+") as slack_token: - slack_token.write(token) - - -def enable_slack(token_config, reboot_processor): - write_user_token(token_config.user_token) - logger.info("Enabling slack notification on processor's config") - config_dict = dict() - config_dict["App"] = dict({"SlackChannel": token_config.channel}) - success = update_config(config_dict, reboot_processor) - - return handle_response(config_dict, success) - - -@slack_router.get("/is-enabled", response_model=SlackIsEnabled) -def is_slack_enabled(): - """ - Returns if slack is already enabled in the processor - """ - return { - "enabled": is_slack_configured() - } - - -@slack_router.delete("/revoke", status_code=status.HTTP_204_NO_CONTENT) -def revoke_slack(): - """ - Remove the current slack configuration in the processor - """ - write_user_token("") - - -@slack_router.post("/add-channel", status_code=status.HTTP_204_NO_CONTENT) -def add_slack_channel(channel: str, reboot_processor: Optional[bool] = True): - """ - Changes the slack's channel used by the processor to send notifications - """ - add_slack_channel_to_config(channel, reboot_processor) - - -@slack_router.post("/enable", status_code=status.HTTP_204_NO_CONTENT) -def enable(body: SlackConfig, reboot_processor: Optional[bool] = True): - """ - Changes the slack workspace configured in the processor - """ - enable_slack(body, reboot_processor) diff --git a/api/tests/app/test_app.py b/api/tests/app/test_app.py index 977c618f..94b7119a 100644 --- a/api/tests/app/test_app.py +++ b/api/tests/app/test_app.py @@ -57,20 +57,8 @@ def test_change_app_config_properly(self, config_rollback, app_config): ({"max_processes": False}, "integer"), ({"dashboardurl": 40}, "string"), ({"dashboardurl": False}, "string"), - ({"slack_channel": 40}, "string"), - ({"slack_channel": False}, "string"), - ({"occupancy_alerts_min_interval": False}, "integer"), - ({"occupancy_alerts_min_interval": "Here_must_be_an_integer_variable"}, "integer"), ({"max_thread_restarts": False}, "integer"), ({"max_thread_restarts": "Here_must_be_an_integer_variable"}, "integer"), - ({"global_reporting_emails": 40}, "string"), - ({"global_reporting_emails": False}, "string"), - ({"global_report_time": 40}, "string"), - ({"global_report_time": False}, "string"), - ({"daily_global_report": "Here_must_be_a_bool_variable"}, "bool"), - ({"daily_global_report": 40}, "bool"), - ({"weekly_global_report": "Here_must_be_a_bool_variable"}, "bool"), - ({"weekly_global_report": 40}, "bool"), ({"heatmap_resolution": 40}, "string"), ({"heatmap_resolution": False}, "string"), ({"entity_config_directory": False}, "string"), diff --git a/api/tests/app/test_camera.py b/api/tests/app/test_camera.py index e3f50155..e5d95768 100644 --- a/api/tests/app/test_camera.py +++ b/api/tests/app/test_camera.py @@ -173,10 +173,6 @@ def test_edit_a_camera_properly(self, config_rollback, camera_sample, rollback_c body = { "violation_threshold": 22, "notify_every_minutes": 22, - "emails": "new_john@email.com,new_doe@email.com", - "enable_slack_notifications": True, - "daily_report": False, - "daily_report_time": "11:22", "id": camera_id, "name": "new_Kitchen", "video_path": "/repo/api/tests/data/mocked_data/data/softbio_vid.mp4", @@ -199,12 +195,6 @@ def test_try_edit_a_camera_non_existent_id(self, config_rollback, camera_sample, camera_id = "Non-existent ID" body = { - "violation_threshold": 22, - "notify_every_minutes": 22, - "emails": "new_john@email.com,new_doe@email.com", - "enable_slack_notifications": True, - "daily_report": False, - "daily_report_time": "11:22", "id": camera_id, "name": "new_Kitchen", "video_path": "/repo/api/tests/data/mocked_data/data/softbio_vid.mp4", @@ -225,12 +215,6 @@ def test_try_edit_camera_wrong_video_path(self, config_rollback, camera_sample, camera_id = camera_sample["id"] body = { - "violation_threshold": 22, - "notify_every_minutes": 22, - "emails": "new_john@email.com,new_doe@email.com", - "enable_slack_notifications": True, - "daily_report": False, - "daily_report_time": "11:22", "id": camera_id, "name": "new_Kitchen", "video_path": "WRONG_PATH", @@ -252,12 +236,6 @@ def test_edit_same_camera_twice(self, config_rollback, camera_sample, rollback_c camera_id = camera_sample["id"] body_1 = { - "violation_threshold": 22, - "notify_every_minutes": 22, - "emails": "new_john@email.com,new_doe@email.com", - "enable_slack_notifications": True, - "daily_report": False, - "daily_report_time": "11:22", "id": camera_id, "name": "new_Kitchen", "video_path": "/repo/api/tests/data/mocked_data/data/softbio_vid.mp4", @@ -268,12 +246,6 @@ def test_edit_same_camera_twice(self, config_rollback, camera_sample, rollback_c } body_2 = { - "violation_threshold": 33, - "notify_every_minutes": 33, - "emails": "new_new_john@email.com,new_new_doe@email.com", - "enable_slack_notifications": False, - "daily_report": False, - "daily_report_time": "10:33", "id": camera_id, "name": "new_new_Kitchen", "video_path": "/repo/api/tests/data/mocked_data/data/softbio_vid.mp4", @@ -320,12 +292,6 @@ def test_edit_camera_empty_string_fields(self, config_rollback, camera_sample, r # Video path is correctly setted body = { - "violation_threshold": 33, - "notify_every_minutes": 33, - "emails": "", - "enable_slack_notifications": False, - "daily_report": False, - "daily_report_time": "", "id": camera_id, "name": "", "video_path": "/repo/api/tests/data/mocked_data/data/softbio_vid.mp4", diff --git a/api/tests/app/test_config.py b/api/tests/app/test_config.py index e5029538..4a76e777 100644 --- a/api/tests/app/test_config.py +++ b/api/tests/app/test_config.py @@ -69,29 +69,6 @@ def test_get_config_file(self, config_rollback): assert response.status_code == 200 - -# pytest -v api/tests/app/test_config.py::TestsGetReportInfo -class TestsGetReportInfo: - """Get Report Info, GET /config/global_report""" - - def test_get_global_report(self, config_rollback): - client, config_sample_path = config_rollback - - response = client.get("/config/global_report") - - config = get_config_file_json(config_sample_path) - app_config = config["app"] - expected_response = { - "emails": app_config["global_reporting_emails"], - "time": app_config["global_report_time"], - "daily": app_config["daily_global_report"], - "weekly": app_config["weekly_global_report"] - } - - assert response.status_code == 200 - assert response.json() == expected_response - - # pytest -v api/tests/app/test_config.py::TestsUpdateConfigFile class TestsUpdateConfigFile: """Get Report Info, PUT /config""" @@ -127,97 +104,3 @@ def test_try_update_config_file_bad_request_II(self, config_rollback): assert response.status_code == 400 assert response.json()["detail"][0]["type"] == "type_error.dict" - - -# pytest -v api/tests/app/test_config.py::TestsUpdateReportInfo -class TestsUpdateReportInfo: - """Update Report Info, PUT /config/global_report""" - - def test_update_report_info_properly(self, config_rollback): - client, config_sample_path = config_rollback - - body = { - "emails": "john@email.com,doe@email.com", - "time": "0:00", - "daily": True, - "weekly": True - } - - response = client.put("/config/global_report", json=body) - - expected_response = expected_response_update_report_info(config_sample_path) - - assert response.status_code == 200 - assert response.json() == expected_response - assert expected_response["app"]["global_reporting_emails"] == "john@email.com,doe@email.com" - assert expected_response["app"]["global_report_time"] == "0:00" - assert expected_response["app"]["daily_global_report"] == "True" - assert expected_response["app"]["weekly_global_report"] == "True" - - def test_try_update_report_info_invalid_keys(self, config_rollback): - """Here, as no valid key was sent, PUT request was processed with the example values from models/config - GlobalReportingEmailsInfo """ - client, config_sample_path = config_rollback - - body = { - "invalid_1": "example_1", - "invalid_2": "example_2", - "invalid_3": "example_3" - } - - response = client.put("/config/global_report", json=body) - - expected_response = expected_response_update_report_info(config_sample_path) - - assert response.status_code == 200 - assert response.json() == expected_response - - def test_try_update_report_info_empty_request_body(self, config_rollback): - """Here, as no valid key was sended, PUT request was finished with the example values from models/config - GlobalReportingEmailsInfo """ - client, config_sample_path = config_rollback - - body = {} - - response = client.put("/config/global_report", json=body) - - expected_response = expected_response_update_report_info(config_sample_path) - - assert response.status_code == 200 - assert response.json() == expected_response - - def test_try_update_report_info_wrong_variable_type_I(self, config_rollback): - client, config_sample_path = config_rollback - - body = { - "emails": "string", - "time": "string", - "daily": 40, - "weekly": 40 - } - - response = client.put("/config/global_report", json=body) - - assert response.status_code == 400 - assert response.json()["detail"][0]["type"] == "type_error.bool" - - def test_try_update_report_info_wrong_variable_type_II(self, config_rollback): - client, config_sample_path = config_rollback - - body = { - "emails": 40, # Here, a string should be sent - "time": True, # Here, a string should be sent - "daily": False, - "weekly": True - } - - response = client.put("/config/global_report", json=body) - - expected_response = expected_response_update_report_info(config_sample_path) - - assert response.status_code == 200 - assert response.json() == expected_response - assert expected_response["app"]["global_reporting_emails"] == "40" - assert expected_response["app"]["global_report_time"] == "True" - assert expected_response["app"]["daily_global_report"] == "False" - assert expected_response["app"]["weekly_global_report"] == "True" diff --git a/api/tests/app/test_ml_models.py b/api/tests/app/test_ml_models.py index debc05d8..e544998b 100644 --- a/api/tests/app/test_ml_models.py +++ b/api/tests/app/test_ml_models.py @@ -355,12 +355,6 @@ def test_try_change_ml_model_create_a_camera_recently(self, config_rollback_came # Create the camera camera_template = { - "violation_threshold": 100, - "notify_every_minutes": 15, - "emails": "john@email.com,doe@email.com", - "enable_slack_notifications": False, - "daily_report": True, - "daily_report_time": "06:00", "id": "200", "name": "Kitchen", "video_path": "/repo/api/tests/data/mocked_data/data/softbio_vid.mp4", diff --git a/api/tests/data/config-x86-openvino_EMPTY.ini b/api/tests/data/config-x86-openvino_EMPTY.ini index 6b8e5962..ea33fbb4 100644 --- a/api/tests/data/config-x86-openvino_EMPTY.ini +++ b/api/tests/data/config-x86-openvino_EMPTY.ini @@ -20,14 +20,7 @@ MaxProcesses = 2 ;Encoder: videoconvert ! vaapih264enc DashboardURL = http://0.0.0.0:8000 DashboardAuthorizationToken = -SlackChannel = lanthorn-notifications -; OccupancyAlertsMinInterval time is measured in seconds (if interval < 0 then no occupancy alerts are triggered) -OccupancyAlertsMinInterval = 180 MaxThreadRestarts = 5 -GlobalReportingEmails = -GlobalReportTime = 06:00 -DailyGlobalReport = False -WeeklyGlobalReport = False HeatmapResolution = 150,150 LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/api/tests/data/mocked_data/data/processor/static/data/performace-metrics diff --git a/api/tests/data/config-x86-openvino_JUST_CAMERAS.ini b/api/tests/data/config-x86-openvino_JUST_CAMERAS.ini index 2981e3be..72f3af39 100644 --- a/api/tests/data/config-x86-openvino_JUST_CAMERAS.ini +++ b/api/tests/data/config-x86-openvino_JUST_CAMERAS.ini @@ -20,14 +20,7 @@ MaxProcesses = 2 ;Encoder: videoconvert ! vaapih264enc DashboardURL = http://0.0.0.0:8000 DashboardAuthorizationToken = -SlackChannel = lanthorn-notifications -; OccupancyAlertsMinInterval time is measured in seconds (if interval < 0 then no occupancy alerts are triggered) -OccupancyAlertsMinInterval = 180 MaxThreadRestarts = 5 -GlobalReportingEmails = -GlobalReportTime = 06:00 -DailyGlobalReport = False -WeeklyGlobalReport = False HeatmapResolution = 150,150 LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/api/tests/data/mocked_data/data/processor/static/data/performace-metrics @@ -104,12 +97,6 @@ BackupInterval = 30 BackupS3Bucket = your-s3-bucket [Source_1] -ViolationThreshold = 100 -NotifyEveryMinutes = 15 -Emails = john@email.com,doe@email.com -EnableSlackNotifications = False -DailyReport = True -DailyReportTime = 06:00 Id = 49 Name = Kitchen VideoPath = /repo/api/tests/data/mocked_data/data/softbio_vid.mp4 @@ -118,12 +105,6 @@ DistMethod = CenterPointsDistance LiveFeedEnabled = False [Source_2] -ViolationThreshold = 90 -NotifyEveryMinutes = 14 -Emails = nicolas@email.com,cage@email.com -EnableSlackNotifications = False -DailyReport = True -DailyReportTime = 05:40 Id = 50 Name = Kitchen VideoPath = /repo/api/tests/data/mocked_data/data/softbio_vid.mp4 diff --git a/api/tests/data/config-x86-openvino_METRICS.ini b/api/tests/data/config-x86-openvino_METRICS.ini index 7fc49c98..44a565bf 100644 --- a/api/tests/data/config-x86-openvino_METRICS.ini +++ b/api/tests/data/config-x86-openvino_METRICS.ini @@ -18,13 +18,7 @@ Encoder = videoconvert ! video/x-raw,format=I420 ! x264enc speed-preset=ultrafas MaxProcesses = 2 DashboardURL = http://0.0.0.0:8000 DashboardAuthorizationToken = -SlackChannel = lanthorn-notifications -OccupancyAlertsMinInterval = 180 MaxThreadRestarts = 5 -GlobalReportingEmails = -GlobalReportTime = 06:00 -DailyGlobalReport = False -WeeklyGlobalReport = False HeatmapResolution = 150,150 LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/api/tests/data/mocked_data/data/processor/static/data/performace-metrics @@ -35,13 +29,7 @@ VideoPath = /repo/api/tests/data/mocked_data/data/softbio_vid.mp4 Tags = kitchen Name = Garden-Camera Id = 0 -Emails = -EnableSlackNotifications = False -NotifyEveryMinutes = 0 -ViolationThreshold = 60 DistMethod = -DailyReport = False -DailyReportTime = 06:00 LiveFeedEnabled = True [Detector] @@ -104,12 +92,6 @@ BackupInterval = 30 BackupS3Bucket = your-s3-bucket [Source_1] -ViolationThreshold = 100 -NotifyEveryMinutes = 15 -Emails = john@email.com,doe@email.com -EnableSlackNotifications = False -DailyReport = True -DailyReportTime = 06:00 Id = 49 Name = Kitchen VideoPath = /repo/data/softbio_vid.mp4 @@ -118,12 +100,6 @@ DistMethod = CenterPointsDistance LiveFeedEnabled = False [Source_2] -ViolationThreshold = 90 -NotifyEveryMinutes = 14 -Emails = nicolas@email.com,cage@email.com -EnableSlackNotifications = False -DailyReport = True -DailyReportTime = 05:40 Id = 50 Name = Kitchen VideoPath = /repo/data/softbio_vid.mp4 @@ -132,12 +108,6 @@ DistMethod = CenterPointsDistance LiveFeedEnabled = False [Source_3] -ViolationThreshold = 95 -NotifyEveryMinutes = 12 -Emails = fer@email.com,nando@email.com -EnableSlackNotifications = False -DailyReport = True -DailyReportTime = 04:40 Id = 51 Name = Bedroom VideoPath = /repo/data/softbio_vid.mp4 @@ -146,12 +116,6 @@ DistMethod = CenterPointsDistance LiveFeedEnabled = False [Source_4] -ViolationThreshold = 95 -NotifyEveryMinutes = 12 -Emails = john@email.com,cena@email.com -EnableSlackNotifications = False -DailyReport = True -DailyReportTime = 03:15 Id = 52 Name = Bedroom VideoPath = /repo/data/softbio_vid.mp4 diff --git a/api/tests/utils/common_functions.py b/api/tests/utils/common_functions.py index 880b5eae..ff6d9b61 100644 --- a/api/tests/utils/common_functions.py +++ b/api/tests/utils/common_functions.py @@ -108,13 +108,7 @@ def create_app_config(key_value_dict=None): "encoder": "string", "max_processes": 0, "dashboardurl": "string", - "slack_channel": "lanthorn-notifications", - "occupancy_alerts_min_interval": 180, "max_thread_restarts": 0, - "global_reporting_emails": "email@email,email2@email", - "global_report_time": "string", - "daily_global_report": False, - "weekly_global_report": False, "log_performance_metrics": False, "log_performance_metrics_directory": "/repo/data/processor/static/data/performace-metrics", "entity_config_directory": "/repo/data/processor/static/data/config", diff --git a/api/tests/utils/example_models.py b/api/tests/utils/example_models.py index e43ca366..541af4f6 100644 --- a/api/tests/utils/example_models.py +++ b/api/tests/utils/example_models.py @@ -1,10 +1,4 @@ camera_template = { - "violation_threshold": 100, - "notify_every_minutes": 15, - "emails": "john@email.com,doe@email.com", - "enable_slack_notifications": False, - "daily_report": True, - "daily_report_time": "06:00", "id": "20", "name": "Kitchen", "video_path": "/repo/api/tests/data/mocked_data/data/softbio_vid.mp4", @@ -15,12 +9,6 @@ camera_example = { - "violation_threshold": 100, - "notify_every_minutes": 15, - "emails": "john@email.com,doe@email.com", - "enable_slack_notifications": False, - "daily_report": True, - "daily_report_time": "06:00", "id": "49", "name": "Kitchen", "video_path": "/repo/api/tests/data/mocked_data/data/softbio_vid.mp4", @@ -30,12 +18,6 @@ } camera_example_2 = { - "violation_threshold": 90, - "notify_every_minutes": 14, - "emails": "nicolas@email.com,cage@email.com", - "enable_slack_notifications": False, - "daily_report": True, - "daily_report_time": "05:40", "id": "50", "name": "Kitchen", "video_path": "/repo/api/tests/data/mocked_data/data/softbio_vid.mp4", @@ -45,12 +27,6 @@ } camera_example_3 = { - "violation_threshold": 95, - "notify_every_minutes": 12, - "emails": "fer@email.com,nando@email.com", - "enable_slack_notifications": False, - "daily_report": True, - "daily_report_time": "04:40", "id": "51", "name": "Bedroom", "video_path": "/repo/api/tests/data/mocked_data/data/softbio_vid.mp4", @@ -61,12 +37,6 @@ camera_example_4 = { - "violation_threshold": 95, - "notify_every_minutes": 12, - "emails": "john@email.com,cena@email.com", - "enable_slack_notifications": False, - "daily_report": True, - "daily_report_time": "03:15", "id": "52", "name": "Bedroom", "video_path": "/repo/api/tests/data/mocked_data/data/softbio_vid.mp4", diff --git a/config-coral.ini b/config-coral.ini index dbd52219..42ec9a77 100644 --- a/config-coral.ini +++ b/config-coral.ini @@ -5,14 +5,7 @@ Encoder = videoconvert ! video/x-raw,format=I420 ! x264enc speed-preset=ultrafas MaxProcesses = 1 DashboardURL = https://app.lanthorn.ai/ DashboardAuthorizationToken = -SlackChannel = lanthorn-notifications -; OccupancyAlertsMinInterval time is measured in seconds (if interval < 0 then no occupancy alerts are triggered) -OccupancyAlertsMinInterval = 180 MaxThreadRestarts = 5 -GlobalReportingEmails = -GlobalReportTime = 06:00 -DailyGlobalReport = False -WeeklyGlobalReport = False HeatmapResolution = 150,150 LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/data/processor/static/data/performace-metrics @@ -36,9 +29,6 @@ VideoPath = /repo/data/softbio_vid.mp4 Tags = kitchen Name = Garden-Camera Id = 0 -Emails = -EnableSlackNotifications = False -NotifyEveryMinutes = 0 ViolationThreshold = 60 ; Distance measurement method: ; - CalibratedDistance: calculate the distance with 3-d transformed points, note that by choosing this method you should specify the inverse calibration matrix of your environment. @@ -46,8 +36,6 @@ ViolationThreshold = 60 ; - FourCornerPointsDistance: compare four corresponding points of pedestrian boxes and get the minimum of them. ; - If left empty the DefaultDistMethod will be employed DistMethod = -DailyReport = False -DailyReportTime = 06:00 LiveFeedEnabled = True [Detector] diff --git a/config-jetson-nano.ini b/config-jetson-nano.ini index b4172432..35701792 100644 --- a/config-jetson-nano.ini +++ b/config-jetson-nano.ini @@ -8,14 +8,7 @@ MaxProcesses = 1 ; attn: deepstream has nvvideoconvert which should be used with deepstream pipelines DashboardURL = https://app.lanthorn.ai/ DashboardAuthorizationToken = -SlackChannel = lanthorn-notifications -; OccupancyAlertsMinInterval time is measured in seconds (if interval < 0 then no occupancy alerts are triggered) -OccupancyAlertsMinInterval = 180 MaxThreadRestarts = 5 -GlobalReportingEmails = -GlobalReportTime = 06:00 -DailyGlobalReport = False -WeeklyGlobalReport = False HeatmapResolution = 150,150 LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/data/processor/static/data/performace-metrics @@ -39,18 +32,12 @@ VideoPath = /repo/data/softbio_vid.mp4 Tags = kitchen Name = Garden-Camera Id = 0 -Emails = -EnableSlackNotifications = False -NotifyEveryMinutes = 0 -ViolationThreshold = 60 ; Distance measurement method: ; - CalibratedDistance: calculate the distance with 3-d transformed points, note that by choosing this method you should specify the inverse calibration matrix of your environment. ; - CenterPointsDistance: compare center of pedestrian boxes together ; - FourCornerPointsDistance: compare four corresponding points of pedestrian boxes and get the minimum of them. ; - If left empty the DefaultDistMethod will be employed DistMethod = -DailyReport = False -DailyReportTime = 06:00 LiveFeedEnabled = True [Detector] diff --git a/config-jetson-tx2.ini b/config-jetson-tx2.ini index 973dfea2..bd50cf78 100644 --- a/config-jetson-tx2.ini +++ b/config-jetson-tx2.ini @@ -8,14 +8,7 @@ MaxProcesses = 1 ; attn: deepstream has nvvideoconvert which should be used with deepstream pipelines DashboardURL = https://app.lanthorn.ai/ DashboardAuthorizationToken = -SlackChannel = lanthorn-notifications -; OccupancyAlertsMinInterval time is measured in seconds (if interval < 0 then no occupancy alerts are triggered) -OccupancyAlertsMinInterval = 180 MaxThreadRestarts = 5 -GlobalReportingEmails = -GlobalReportTime = 06:00 -DailyGlobalReport = False -WeeklyGlobalReport = False HeatmapResolution = 150,150 LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/data/processor/static/data/performace-metrics @@ -39,18 +32,12 @@ VideoPath = /repo/data/softbio_vid.mp4 Tags = kitchen Name = Garden-Camera Id = 0 -Emails = -EnableSlackNotifications = False -NotifyEveryMinutes = 0 -ViolationThreshold = 60 ; Distance measurement method: ; - CalibratedDistance: calculate the distance with 3-d transformed points, note that by choosing this method you should specify the inverse calibration matrix of your environment. ; - CenterPointsDistance: compare center of pedestrian boxes together ; - FourCornerPointsDistance: compare four corresponding points of pedestrian boxes and get the minimum of them. ; - If left empty the DefaultDistMethod will be employed DistMethod = -DailyReport = False -DailyReportTime = 06:00 LiveFeedEnabled = True [Detector] diff --git a/config-x86-gpu-tensorrt.ini b/config-x86-gpu-tensorrt.ini index 77afba79..bca96e22 100644 --- a/config-x86-gpu-tensorrt.ini +++ b/config-x86-gpu-tensorrt.ini @@ -21,15 +21,7 @@ MaxProcesses = 1 ;Encoder: videoconvert ! vaapih264enc DashboardURL = https://app.lanthorn.ai/ DashboardAuthorizationToken = -EnableSlackNotifications = no -SlackChannel = lanthorn-notifications -; OccupancyAlertsMinInterval time is measured in seconds (if interval < 0 then no occupancy alerts are triggered) -OccupancyAlertsMinInterval = 180 MaxThreadRestarts = 5 -GlobalReportingEmails = -GlobalReportTime = 06:00 -DailyGlobalReport = False -WeeklyGlobalReport = False HeatmapResolution = 150,150 LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/data/processor/static/data/performace-metrics @@ -40,18 +32,12 @@ VideoPath = /repo/data/softbio_vid.mp4 Tags = kitchen Name = Garden-Camera Id = 0 -Emails = -EnableSlackNotifications = False -NotifyEveryMinutes = 0 -ViolationThreshold = 60 ; Distance measurement method: ; - CalibratedDistance: calculate the distance with 3-d transformed points, note that by choosing this method you should specify the inverse calibration matrix of your environment. ; - CenterPointsDistance: compare center of pedestrian boxes together ; - FourCornerPointsDistance: compare four corresponding points of pedestrian boxes and get the minimum of them. ; - If left empty the DefaultDistMethod will be employed DistMethod = -DailyReport = False -DailyReportTime = 06:00 LiveFeedEnabled = True [Detector] diff --git a/config-x86-gpu.ini b/config-x86-gpu.ini index e5ca863d..622bf33c 100644 --- a/config-x86-gpu.ini +++ b/config-x86-gpu.ini @@ -20,13 +20,7 @@ MaxProcesses = 1 ;Encoder: videoconvert ! vaapih264enc DashboardURL = https://app.lanthorn.ai/ DashboardAuthorizationToken = -SlackChannel = lanthorn-notifications -OccupancyAlertsMinInterval = 180 MaxThreadRestarts = 5 -GlobalReportingEmails = -GlobalReportTime = 06:00 -DailyGlobalReport = False -WeeklyGlobalReport = False HeatmapResolution = 150,150 LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/data/processor/static/data/performace-metrics @@ -37,18 +31,12 @@ VideoPath = /repo/data/softbio_vid.mp4 Tags = kitchen Name = Garden-Camera Id = 0 -Emails = -EnableSlackNotifications = False -NotifyEveryMinutes = 0 -ViolationThreshold = 60 ; Distance measurement method: ; - CalibratedDistance: calculate the distance with 3-d transformed points, note that by choosing this method you should specify the inverse calibration matrix of your environment. ; - CenterPointsDistance: compare center of pedestrian boxes together ; - FourCornerPointsDistance: compare four corresponding points of pedestrian boxes and get the minimum of them. ; - If left empty the DefaultDistMethod will be employed DistMethod = -DailyReport = False -DailyReportTime = 06:00 LiveFeedEnabled = True [Detector] diff --git a/config-x86-openvino.ini b/config-x86-openvino.ini index 027c6068..d4a9f2df 100644 --- a/config-x86-openvino.ini +++ b/config-x86-openvino.ini @@ -20,14 +20,7 @@ MaxProcesses = 2 ;Encoder: videoconvert ! vaapih264enc DashboardURL = https://app.lanthorn.ai/ DashboardAuthorizationToken = -SlackChannel = lanthorn-notifications -; OccupancyAlertsMinInterval time is measured in seconds (if interval < 0 then no occupancy alerts are triggered) -OccupancyAlertsMinInterval = 180 MaxThreadRestarts = 5 -GlobalReportingEmails = -GlobalReportTime = 06:00 -DailyGlobalReport = False -WeeklyGlobalReport = False HeatmapResolution = 150,150 LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/data/processor/static/data/performace-metrics @@ -38,18 +31,12 @@ VideoPath = /repo/data/softbio_vid.mp4 Tags = kitchen Name = Garden-Camera Id = 0 -Emails = -EnableSlackNotifications = False -NotifyEveryMinutes = 0 -ViolationThreshold = 60 ; Distance measurement method: ; - CalibratedDistance: calculate the distance with 3-d transformed points, note that by choosing this method you should specify the inverse calibration matrix of your environment. ; - CenterPointsDistance: compare center of pedestrian boxes together ; - FourCornerPointsDistance: compare four corresponding points of pedestrian boxes and get the minimum of them. ; - If left empty the DefaultDistMethod will be employed DistMethod = -DailyReport = False -DailyReportTime = 06:00 LiveFeedEnabled = True [Detector] diff --git a/config-x86.ini b/config-x86.ini index 002a2fb0..c3b89913 100644 --- a/config-x86.ini +++ b/config-x86.ini @@ -19,16 +19,8 @@ MaxProcesses = 1 ; WIP https://github.com/neuralet/neuralet/issues/91 ;Encoder: videoconvert ! vaapih264enc DashboardURL = https://app.lanthorn.ai/ -DashboardAuthorizationToken = -EnableSlackNotifications = no -SlackChannel = lanthorn-notifications -; OccupancyAlertsMinInterval time is measured in seconds (if interval < 0 then no occupancy alerts are triggered) -OccupancyAlertsMinInterval = 180 +DashboardAuthorizationToken = MaxThreadRestarts = 5 -GlobalReportingEmails = -GlobalReportTime = 06:00 -DailyGlobalReport = False -WeeklyGlobalReport = False HeatmapResolution = 150,150 LogPerformanceMetrics = False LogPerformanceMetricsDirectory = /repo/data/processor/static/data/performace-metrics @@ -39,18 +31,12 @@ VideoPath = /repo/data/softbio_vid.mp4 Tags = kitchen Name = Garden-Camera Id = 0 -Emails = -EnableSlackNotifications = False -NotifyEveryMinutes = 0 -ViolationThreshold = 60 ; Distance measurement method: ; - CalibratedDistance: calculate the distance with 3-d transformed points, note that by choosing this method you should specify the inverse calibration matrix of your environment. ; - CenterPointsDistance: compare center of pedestrian boxes together ; - FourCornerPointsDistance: compare four corresponding points of pedestrian boxes and get the minimum of them. ; - If left empty the DefaultDistMethod will be employed DistMethod = -DailyReport = False -DailyReportTime = 06:00 LiveFeedEnabled = True [Detector] diff --git a/libs/config_engine.py b/libs/config_engine.py index df2e8568..9a755139 100644 --- a/libs/config_engine.py +++ b/libs/config_engine.py @@ -4,8 +4,6 @@ import configparser import threading -from libs.notifications.slack_notifications import is_slack_configured -from libs.utils.mailing import is_mailing_configured from libs.utils import config as config_utils from libs.utils.loggers import get_source_log_directory from libs.entities.video_source import VideoSource @@ -145,31 +143,11 @@ def get_video_sources(self): sources = [] for title, section in self.config.items(): if title.startswith("Source_"): - is_slack_enabled = self.config["App"]["SlackChannel"] and is_slack_configured() - is_email_enabled = is_mailing_configured() config_dir = config_utils.get_source_config_directory(self) video_source_logs_dir = get_source_log_directory(self) - src = VideoSource(section, title, is_email_enabled, is_slack_enabled, config_dir, - video_source_logs_dir) + src = VideoSource(section, title, config_dir, video_source_logs_dir) sources.append(src) return sources except Exception: # Sources are invalid in config file. What should we do? raise RuntimeError("Invalid sources in config file") - - def should_send_email_notifications(self, entity): - if "emails" in entity: - if is_mailing_configured(): - return True - else: - self.logger.warning("Tried to enable email notifications but oauth2_cred.json is missing") - return False - - def should_send_slack_notifications(self, ent): - if self.config["App"]["SlackChannel"] and ent["enable_slack_notifications"]: - if is_slack_configured(): - return True - else: - self.logger.warning( - "Tried to enable slack notifications but slack_token.txt is either missing or unauthorized") - return False diff --git a/libs/entities/base_entity.py b/libs/entities/base_entity.py index b23f8d95..15f20e3d 100644 --- a/libs/entities/base_entity.py +++ b/libs/entities/base_entity.py @@ -4,8 +4,7 @@ class BaseEntity(): - def __init__(self, config_section: dict, section_title: str, send_email_enabled: bool, send_slack_enabled: bool, - config_dir: str, logs_dir: str): + def __init__(self, config_section: dict, section_title: str, config_dir: str, logs_dir: str): self.config_dir = config_dir self.section = section_title self.id = config_section["Id"] @@ -15,15 +14,6 @@ def __init__(self, config_section: dict, section_title: str, send_email_enabled: self.tags = config_section["Tags"].split(",") else: self.tags = [] - if "Emails" in config_section and config_section["Emails"].strip() != "": - self.emails = config_section["Emails"].split(",") - else: - self.emails = [] - self.enable_slack_notifications = config_to_boolean(config_section["EnableSlackNotifications"]) - self.notify_every_minutes = int(config_section["NotifyEveryMinutes"]) - self.violation_threshold = int(config_section["ViolationThreshold"]) - self.daily_report = config_to_boolean(config_section["DailyReport"]) - self.daily_report_time = config_section.get("DailyReportTime") or "06:00" def __getitem__(self, key): return self.__dict__[key] diff --git a/libs/entities/video_source.py b/libs/entities/video_source.py index 4a826fd1..74da8cb1 100644 --- a/libs/entities/video_source.py +++ b/libs/entities/video_source.py @@ -3,16 +3,8 @@ class VideoSource(BaseEntity): - def __init__(self, config_section: dict, section_title: str, send_email_enabled: bool, send_slack_enabled: bool, - config_dir: str, logs_dir: str): - super().__init__(config_section, section_title, send_email_enabled, send_slack_enabled, config_dir, logs_dir) + def __init__(self, config_section: dict, section_title: str, config_dir: str, logs_dir: str): + super().__init__(config_section, section_title, config_dir, logs_dir) self.type = "Camera" self.url = config_section["VideoPath"] self.dist_method = config_section["DistMethod"] - - if (self.notify_every_minutes > 0 and self.violation_threshold > 0): - self.should_send_email_notifications = send_email_enabled and self.emails != [] - self.should_send_slack_notifications = send_slack_enabled and self.enable_slack_notifications - else: - self.should_send_email_notifications = False - self.should_send_slack_notifications = False diff --git a/libs/notifications/__init__.py b/libs/notifications/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/libs/notifications/slack_notifications.py b/libs/notifications/slack_notifications.py deleted file mode 100644 index 4a7ed653..00000000 --- a/libs/notifications/slack_notifications.py +++ /dev/null @@ -1,77 +0,0 @@ -import logging -import os -from slack import WebClient - -def is_slack_configured(): - if not os.path.exists("slack_token.txt"): - return False - with open("slack_token.txt", "r") as user_token: - value = user_token.read() - return bool(value) - -class SlackService: - def __init__(self, config): - self.config = config - self.logger = logging.getLogger(__name__) - with open("slack_token.txt", "r") as slack_token: - self.slack_token = slack_token.read() - self.slack_client = WebClient(token=self.slack_token) - self.channel = config.get_section_dict("App")["SlackChannel"] - self.username = "lanthorn" - self.icon_emoji = ":robot_face:" - - def post_message(self, msg, recipient): - return self.slack_client.chat_postMessage( - channel=recipient, - text=msg - ) - - def post_message_to_channel(self, msg, channel): - self.logger.info(f"Posting to {channel}") - return self.slack_client.chat_postMessage( - channel=channel, - text=msg, - username=self.username, - parse='full' - ) - - def file_upload(self, file_content, file_name, file_type, title=None): - return self.slack_client.files_upload( - channels=self.channel, - content=file_content, - filename=file_name, - filetype=file_type, - initial_comment='{} Log File'.format(file_name), - title=title - ) - - def user_info(self, uid): - return self.slack_client.users_info( - user=uid, - token=self.slack_token - ) - - def violation_report(self, entity_info, number): - entity_id, entity_type, entity_name = entity_info['id'], entity_info['type'], entity_info['name'] - msg = f"We found {number} violations in {entity_id}: {entity_name} ({entity_type})" - self.post_message_to_channel(msg, self.channel) - - def daily_report(self, entity_info, number): - entity_id, entity_type, entity_name = entity_info['id'], entity_info['type'], entity_info['name'] - msg = f"Yesterday we found {number} violations in {entity_id}: {entity_name} ({entity_type})." - self.post_message_to_channel(msg, self.channel) - - def occupancy_alert(self, entity_info, number, threshold): - entity_id, entity_type = entity_info['id'], entity_info['type'] - entity_name = entity_info['name'] - msg = f"Occupancy threshold was exceeded in {entity_type} {entity_id}: {entity_name}." \ - f"We found {number} people out of a capacity of {threshold}." - self.post_message_to_channel(msg, self.channel) - - def send_global_report(self, report_type, sources, sources_violations_per_hour): - msg = f"*{report_type.capitalize()} Report:* \n\n" - msg += "\n*Cameras:*\n" - for index, source in enumerate(sources): - entity_id, entity_name = source['id'], source['name'] - msg += f"*{entity_id}:* {entity_name} - {sum(sources_violations_per_hour[index])} Violations\n" - self.post_message_to_channel(msg, self.channel) diff --git a/libs/processor_core.py b/libs/processor_core.py index cb596eed..08422af6 100644 --- a/libs/processor_core.py +++ b/libs/processor_core.py @@ -6,7 +6,6 @@ from queue import Empty import schedule from libs.engine_threading import run_video_processing -from libs.utils.notifications import run_check_violations logger = logging.getLogger(__name__) logging.getLogger().setLevel(logging.INFO) @@ -42,22 +41,6 @@ def start(self): self._serve() logging.info("processor core has been terminated.") - def _setup_scheduled_tasks(self): - logger.info("Setup scheduled tasks") - sources = self.config.get_video_sources() - for src in sources: - should_send_email_notifications = src.should_send_email_notifications - should_send_slack_notifications = src.should_send_slack_notifications - if should_send_email_notifications or should_send_slack_notifications: - interval = src.notify_every_minutes - threshold = src.violation_threshold - schedule.every(interval).minutes.do( - run_check_violations, threshold, self.config, src, interval, - should_send_email_notifications, should_send_slack_notifications - ).tag("notification-task") - else: - logger.info(f"should not send notification for camera {src['id']}") - def _serve(self): logger.info("Core is listening for commands ... ") while True: @@ -77,7 +60,6 @@ def _handle_command(self, cmd_code): return self.config.reload() - self._setup_scheduled_tasks() self._tasks[Commands.PROCESS_VIDEO_CFG] = True self._start_processing() diff --git a/libs/reports/__init__.py b/libs/reports/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/libs/reports/notifications.py b/libs/reports/notifications.py deleted file mode 100644 index e7a88167..00000000 --- a/libs/reports/notifications.py +++ /dev/null @@ -1,83 +0,0 @@ -import os -import csv -import operator -import numpy as np -import pandas as pd -import logging - -from datetime import date, timedelta -from libs.notifications.slack_notifications import SlackService, is_slack_configured -from libs.utils.mailing import MailService, is_mailing_configured -from libs.utils.loggers import get_source_log_directory - - -logger = logging.getLogger(__name__) - - -def get_daily_report(config, entity_info, report_date): - entity_type = entity_info['type'] - all_violations_per_hour = [] - log_directory = get_source_log_directory(config) - - if entity_type == 'Camera': - reports_directory = os.path.join(log_directory, entity_info['id'], "reports") - daily_csv_file_paths = [ - os.path.join(reports_directory, '','report_' + report_date + '.csv') - ] - else: - raise NotImplementedError - - for file_path in daily_csv_file_paths: - violations_per_hour = [] - if not os.path.isfile(file_path): - violations_per_hour = list(np.zeros(24).astype(int)) - else: - with open(file_path, newline='') as csvfile: - reader = csv.DictReader(csvfile) - for row in reader: - violations_per_hour.append(int(row["DetectedObjects"]) - int(row["NoInfringement"])) - if not all_violations_per_hour: - all_violations_per_hour = violations_per_hour - else: - all_violations_per_hour = list(map(operator.add, all_violations_per_hour, violations_per_hour)) - return all_violations_per_hour - - -def send_daily_report_notification(config, entity_info): - yesterday = str(date.today() - timedelta(days=1)) - violations_per_hour = get_daily_report(config, entity_info, yesterday) - - if sum(violations_per_hour): - if is_mailing_configured() and entity_info['should_send_email_notifications']: - ms = MailService(config) - ms.send_daily_report(entity_info, sum(violations_per_hour), violations_per_hour) - if is_slack_configured() and entity_info['should_send_slack_notifications']: - slack_service = SlackService(config) - slack_service.daily_report(entity_info, sum(violations_per_hour)) - - -def send_global_report(report_type, config, sources, sources_violations_per_hour): - emails = config.get_section_dict("App")["GlobalReportingEmails"].split(",") - if is_mailing_configured() and emails: - ms = MailService(config) - ms.send_global_report(report_type, sources, sources_violations_per_hour) - if is_slack_configured(): - slack_service = SlackService(config) - slack_service.send_global_report(report_type, sources, sources_violations_per_hour) - - -def send_daily_global_report(config, sources): - yesterday = str(date.today() - timedelta(days=1)) - sources_violations_per_hour = [get_daily_report(config, source, yesterday) for source in sources] - send_global_report('daily', config, sources, sources_violations_per_hour) - - -def send_weekly_global_report(config, sources): - weekly_sources_violations_per_hour = np.zeros((len(sources), 24)) - start_week = str(date.today() - timedelta(days=8)) - yesterday = str(date.today() - timedelta(days=1)) - date_range = pd.date_range(start=start_week, end=yesterday) - for report_date in date_range: - weekly_sources_violations_per_hour += np.array( - [get_daily_report(config, source, report_date.strftime('%Y-%m-%d')) for source in sources]) - send_global_report('weekly', config, sources, weekly_sources_violations_per_hour) diff --git a/libs/utils/_global_entity_report.html b/libs/utils/_global_entity_report.html deleted file mode 100644 index 94f01f5a..00000000 --- a/libs/utils/_global_entity_report.html +++ /dev/null @@ -1,18 +0,0 @@ - - -

- We found {detections} violations in {entity_name} ({entity_type}). -

- -
- - - - - - {violations_per_hour} -
Hour# of Violations
-
- - diff --git a/libs/utils/mail_daily_report.html b/libs/utils/mail_daily_report.html deleted file mode 100644 index 645dbac2..00000000 --- a/libs/utils/mail_daily_report.html +++ /dev/null @@ -1,83 +0,0 @@ - - - - - - - - - - - - - -
- - - - - - -
- -
-
- - - - - - -
- - - - - - - - - -
-

- Yesterday we found {detections} violations in {entity_name} ({entity_type}). -

- -
- - - - - - {violations_per_hour} -
Hour# of Violations
-
-
-

You can see more information or adjust this settings on:

- {url} -
-
- diff --git a/libs/utils/mail_global_report.html b/libs/utils/mail_global_report.html deleted file mode 100644 index dadf2e71..00000000 --- a/libs/utils/mail_global_report.html +++ /dev/null @@ -1,74 +0,0 @@ - - - - - - - - - - - - - -
- - - - - - -
- -
-
- - - - - - -
- - - - - - {global_cameras_report} - - - - -
-

- Cameras: -

-
-

You can see more information or adjust this settings on:

- {url} -
-
- diff --git a/libs/utils/mail_occupancy_notification.html b/libs/utils/mail_occupancy_notification.html deleted file mode 100644 index 5137c37e..00000000 --- a/libs/utils/mail_occupancy_notification.html +++ /dev/null @@ -1,65 +0,0 @@ - - - - - - - - - - - - - -
- - - - - - -
- -
-
- - - - - - -
- - - - - - - - - -
-

- Occupancy threshold was exceeded in {entity_type} {entity_id}: {entity_name}. -

-

- We found {num_occupancy} people out of a capacity of {entity_threshold}. -

-
-

You can see more information or adjust this settings on:

- {url} -
-
- diff --git a/libs/utils/mail_violations_notification.html b/libs/utils/mail_violations_notification.html deleted file mode 100644 index 1028cddc..00000000 --- a/libs/utils/mail_violations_notification.html +++ /dev/null @@ -1,61 +0,0 @@ - - - - - - - - - - - - - -
- - - - - - -
- -
-
- - - - - - -
- - - - - - - - - -
-

- We found {detections} violations in {entity_name} ({entity_type})

-
-

You can see more information or adjust this settings on:

- {url} -
-
- diff --git a/libs/utils/mailing.py b/libs/utils/mailing.py deleted file mode 100644 index fae5af80..00000000 --- a/libs/utils/mailing.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -import yagmail -import logging -import codecs -from dotenv import load_dotenv -load_dotenv() - -def send_email(from_email, receiver, subject, contents, attachments=None): - with yagmail.SMTP(from_email, oauth2_file="oauth2_cred.json") as yag: - yag.send( - to=receiver, - subject=subject, - contents=contents, - attachments=attachments, - newline_to_break=False - ) - - -def is_mailing_configured(): - if os.path.isfile("oauth2_cred.json"): - return True - else: - return False - - -class MailService: - - def __init__(self, config): - self.config = config - self.email_from = os.getenv("NOTIFICATION_EMAIL_FROM") - self.logger = logging.getLogger(__name__) - - def send_email_notification(self, entity_info, subject, content): - if not entity_info.emails: - self.logger.info("No notification was emailed because no email was added for selected source") - return - to = entity_info.emails - send_email(self.email_from, to, subject, content) - self.logger.info(f"Sent notification email to {to}") - - def send_violation_notification(self, entity_info, num_violations): - entity_type = entity_info.type - frontend_url = self.config.get_section_dict("App")["DashboardURL"] - with codecs.open('libs/utils/mail_violations_notification.html', 'r') as f: - html_string = f.read() - html_string = html_string.replace('{detections}', str(num_violations)) - html_string = html_string.replace('{entity_type}', entity_type) - html_string = html_string.replace('{entity_name}', entity_info.name) - # TODO: Fix this - html_string = html_string.replace('{url}', f'{frontend_url}/dashboard?source=email') - subject = f"[Lanthorn] Violation Report on {entity_info.name} ({entity_type})" - self.send_email_notification(entity_info, subject, html_string) - - - def fill_report_table(self, html_path, entity_info, num_violations, hours_sumary): - with codecs.open(html_path, 'r') as f: - html_string = f.read() - html_string = html_string.replace('{detections}', str(num_violations)) - violations_per_hour = "" - for hour, hour_violation in enumerate(hours_sumary): - violations_per_hour += f"{hour}:00{hour_violation}" - html_string = html_string.replace('{violations_per_hour}', violations_per_hour) - html_string = html_string.replace('{entity_type}', entity_info.type) - html_string = html_string.replace('{entity_name}', entity_info.name) - return html_string - - - def send_daily_report(self, entity_info, num_violations, hours_sumary): - entity_type = entity_info.type - frontend_url = self.config.get_section_dict("App")["DashboardURL"] - html_string = self.fill_report_table("libs/utils/mail_daily_report.html", entity_info, num_violations, hours_sumary) - html_string = html_string.replace('{url}', f'{frontend_url}/dashboard?source=email') - subject = f"[Lanthorn] Daily Report on {entity_type}: {entity_info.name}" - self.send_email_notification(entity_info, subject, html_string) - - def send_occupancy_notification(self, entity_info, num_occupancy, threshold): - entity_id, entity_type = entity_info.id, entity_info.type - entity_name = entity_info.name - frontend_url = self.config.get_section_dict("App")["DashboardURL"] - with codecs.open('libs/utils/mail_occupancy_notification.html', 'r') as f: - html_string = f.read() - html_string = html_string.replace('{num_occupancy}', str(num_occupancy)) - html_string = html_string.replace('{entity_id}', entity_id) - html_string = html_string.replace('{entity_type}', entity_type) - html_string = html_string.replace('{entity_name}', entity_name) - html_string = html_string.replace('{entity_threshold}', str(threshold)) - html_string = html_string.replace('{url}', f'{frontend_url}/dashboard?source=email') - subject = f"[Lanthorn] Occupancy Alert on {entity_name} ({entity_type})" - self.send_email_notification(entity_info, subject, html_string) - - - def send_global_report(self, report_type, sources, sources_violations_per_hour): - frontend_url = self.config.get_section_dict("App")["DashboardURL"] - with codecs.open('libs/utils/mail_global_report.html', 'r') as f: - html_string = f.read() - cameras_report = "" - for index, source in enumerate(sources): - cameras_report += self.fill_report_table( - "libs/utils/_global_entity_report.html", source, - sum(sources_violations_per_hour[index]), sources_violations_per_hour[index] - ) - html_string = html_string.replace('{global_cameras_report}', cameras_report) - html_string = html_string.replace('{url}', f'{frontend_url}/dashboard?source=email') - subject = f"[Lanthorn] Global {report_type.capitalize()} Report" - to = self.config.get_section_dict("App")["GlobalReportingEmails"].split(",") - send_email(self.email_from, to, subject, html_string) - self.logger.info(f"Sent notification email to {to}") diff --git a/libs/utils/notifications.py b/libs/utils/notifications.py deleted file mode 100644 index 0a899a2a..00000000 --- a/libs/utils/notifications.py +++ /dev/null @@ -1,51 +0,0 @@ -import os -import csv -from datetime import date, datetime -from threading import Thread -from .mailing import MailService -from .loggers import get_source_log_directory -from ..notifications.slack_notifications import SlackService - - -def get_violations(file_path, interval): - now = datetime.today() - violations = 0 - with open(file_path, 'r', newline='') as csvfile: - reader = csv.DictReader(csvfile) - for row in reader: - row_time = datetime.strptime(row['Timestamp'], "%Y-%m-%d %H:%M:%S") - if ((now - row_time).seconds / 60) < interval: - violations += int(row['ViolatingObjects']) - return violations - - -# Some vars are only used to pass through to mail service/ Maybe this could be refactored. -def check_violations(entity_type, threshold, config, entity_info, interval, should_send_email, should_send_slack): - log_dir = get_source_log_directory(config) - today = str(date.today()) - - violations = 0 - if entity_type == 'Camera': - file_paths = [os.path.join(log_dir, entity_info.id, "objects_log", today + ".csv")] - else: - # entity_type == 'Area' - raise NotImplementedError - - for file_path in file_paths: - violations += get_violations(file_path, interval) - - if violations > threshold: - # send notification - if should_send_email: - ms = MailService(config) - ms.send_violation_notification(entity_info, violations) - if should_send_slack: - slack_service = SlackService(config) - slack_service.violation_report(entity_info, violations) - - -def run_check_violations(threshold, config, entity_info, interval, should_send_email, should_send_slack): - entity_type = entity_info.type - job_thread = Thread(target=check_violations, - args=[entity_type, threshold, config, entity_info, interval, should_send_email, should_send_slack]) - job_thread.start() diff --git a/run_periodic_task.py b/run_periodic_task.py index 0022c072..b07a130c 100644 --- a/run_periodic_task.py +++ b/run_periodic_task.py @@ -5,8 +5,6 @@ from libs.backups.s3_backup import raw_data_backup from libs.config_engine import ConfigEngine -from libs.reports.notifications import (send_daily_report_notification, send_daily_global_report, - send_weekly_global_report) logger = logging.getLogger(__name__) @@ -33,21 +31,6 @@ def main(config): else: raise ValueError(f"Not supported periodic task named: {task_name}") - # Schedule daily/weekly reports for sources - sources = config.get_video_sources() - for src in sources: - if src['daily_report']: - schedule.every().day.at(src['daily_report_time']).do( - send_daily_report_notification, config=config, entity_info=src) - if config.get_boolean("App", "DailyGlobalReport"): - schedule.every().day.at(config.get_section_dict("App")["GlobalReportTime"]).do( - send_daily_global_report, config=config, sources=sources - ) - if config.get_boolean("App", "WeeklyGlobalReport"): - schedule.every(7).days.at(config.get_section_dict("App")["GlobalReportTime"]).do( - send_weekly_global_report, config=config, sources=sources - ) - while True: schedule.run_pending() time.sleep(10) From d6f38cc8f047ec559a40f794a4b89369d1ea7606 Mon Sep 17 00:00:00 2001 From: Pablo Grill Date: Fri, 30 Jul 2021 14:34:44 -0300 Subject: [PATCH 4/7] Remove in_out and roi configurations. --- api/models/camera.py | 40 +------ api/models/export.py | 5 - api/routers/cameras.py | 110 +----------------- libs/loggers/source_loggers/video_logger.py | 6 - .../objects_filtering.py | 63 ---------- libs/utils/in_out.py | 88 -------------- 6 files changed, 5 insertions(+), 307 deletions(-) delete mode 100644 libs/utils/in_out.py diff --git a/api/models/camera.py b/api/models/camera.py index 05e221bf..f706d310 100644 --- a/api/models/camera.py +++ b/api/models/camera.py @@ -2,7 +2,7 @@ import cv2 as cv from pydantic import BaseModel, Field, validator -from typing import List, Optional, Tuple +from typing import List, Optional from .base import EntityConfigDTO, SnakeModel @@ -14,8 +14,6 @@ class CameraDTO(EntityConfigDTO): distMethod: Optional[str] = Field("", example='CenterPointsDistance') liveFeedEnabled: bool = Field(True, example=True) hasBeenCalibrated: bool = Field(False, example=False) - hasDefinedRoi: bool = Field(False, example=False) - hasInOutBorder: bool = Field(False, example=False) class CreateCameraDTO(CameraDTO): @@ -56,39 +54,3 @@ class Config: class VideoLiveFeedModel(BaseModel): enabled: bool - - -class ContourRoI(BaseModel): - contour_roi: List[Tuple[int, int]] - - class Config: - schema_extra = { - 'example': { - 'contour_roi': [[88, 58], [90, 284], [279, 284], [281, 58]] - } - } - - -class InOutBoundary(BaseModel): - name: Optional[str] = Field("", example="Left Door") - in_out_boundary: Tuple[Tuple[int, int], Tuple[int, int]] - - -class InOutBoundaries(BaseModel): - in_out_boundaries: List[InOutBoundary] - - class Config: - schema_extra = { - "example": { - "in_out_boundaries": [ - { - "name": "Left Door", - "in_out_boundary": [[5, 5], [5, 240]] - }, - { - "name": "Right Door", - "in_out_boundary": [[280, 5], [280, 240]] - }, - ] - } - } diff --git a/api/models/export.py b/api/models/export.py index bfa7d1e4..223307ef 100644 --- a/api/models/export.py +++ b/api/models/export.py @@ -11,11 +11,6 @@ class ExportDataType(str, Enum): raw_data = "raw_data" - occupancy = "occupancy" - social_distancing = "social-distancing" - facemask_usage = "facemask-usage" - in_out = "in-out" - dwell_time = "dwell-time" all_data = "all_data" diff --git a/api/routers/cameras.py b/api/routers/cameras.py index e8138f99..0a6b6b0c 100644 --- a/api/routers/cameras.py +++ b/api/routers/cameras.py @@ -4,8 +4,6 @@ import os import shutil import re -import json -import numpy as np from fastapi import APIRouter, status from starlette.exceptions import HTTPException @@ -17,12 +15,10 @@ from api.settings import Settings from api.utils import ( - extract_config, get_config, handle_response, restart_processor, - update_config, map_section_from_config, map_to_config_file_format, bad_request_serializer + extract_config, get_config, handle_response, update_config, map_section_from_config, + map_to_config_file_format, bad_request_serializer ) -from api.models.camera import (CameraDTO, CamerasListDTO, CreateCameraDTO, ImageModel, VideoLiveFeedModel, - ContourRoI, InOutBoundaries) -from libs.source_post_processors.objects_filtering import ObjectsFilteringPostProcessor +from api.models.camera import CameraDTO, CamerasListDTO, CreateCameraDTO, ImageModel, VideoLiveFeedModel from libs.utils.utils import validate_file_exists_and_is_not_empty logger = logging.getLogger(__name__) @@ -42,8 +38,6 @@ def map_camera(camera_name, config, options=[]): camera_dict["image"] = image_string calibration_file_path = get_camera_calibration_path(settings.config, camera_id) camera_dict["has_been_calibrated"] = validate_file_exists_and_is_not_empty(calibration_file_path) - roi_file_path = ObjectsFilteringPostProcessor.get_roi_file_path(camera_id, settings.config) - camera_dict["has_defined_roi"] = validate_file_exists_and_is_not_empty(roi_file_path) return camera_dict @@ -84,6 +78,7 @@ def get_camera_default_image_string(camera_id): with open(image_path, "rb") as image_file: return base64.b64encode(image_file.read()) + def reestructure_cameras(config_dict): """Ensure that all [Source_0, Source_1, ...] are consecutive""" source_names = [x for x in config_dict.keys() if x.startswith("Source_")] @@ -305,100 +300,3 @@ async def enable_video_live_feed(camera_id: str, disable_other_cameras: Optional config_dict[f"Source_{index}"]["LiveFeedEnabled"] = "True" success = update_config(config_dict, True) return handle_response(None, success, status.HTTP_204_NO_CONTENT) - - -@cameras_router.get("/{camera_id}/roi_contour") -async def get_roi_contour(camera_id: str): - """ - Get the contour of the RoI - """ - validate_camera_existence(camera_id) - roi_file_path = ObjectsFilteringPostProcessor.get_roi_file_path(camera_id, settings.config) - roi_contour = ObjectsFilteringPostProcessor.get_roi_contour(roi_file_path) - if roi_contour is None: - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"There is no defined RoI for {camera_id}") - return roi_contour.tolist() - - -@cameras_router.put("/{camera_id}/roi_contour", status_code=status.HTTP_201_CREATED) -async def add_or_replace_roi_contour(camera_id: str, body: ContourRoI, reboot_processor: Optional[bool] = True): - """ - Define a RoI for a camera or replace its current one. - A RoI is defined by a vector of [x,y] 2-tuples, that map to coordinates in the image. - """ - validate_camera_existence(camera_id) - roi_file_path = ObjectsFilteringPostProcessor.get_roi_file_path(camera_id, settings.config) - dir_path = Path(roi_file_path).parents[0] - Path(dir_path).mkdir(parents=True, exist_ok=True) - roi_contour = np.array(body.contour_roi, dtype=int) - np.savetxt(roi_file_path, roi_contour, delimiter=',', fmt='%i') - restart_processor() if reboot_processor else True - return roi_contour.tolist() - - -@cameras_router.delete("/{camera_id}/roi_contour") -async def remove_roi_contour(camera_id: str, reboot_processor: Optional[bool] = True): - """ - Delete the defined RoI for a camera. - """ - validate_camera_existence(camera_id) - roi_file_path = ObjectsFilteringPostProcessor.get_roi_file_path(camera_id, settings.config) - if not validate_file_exists_and_is_not_empty(roi_file_path): - detail = f"There is no defined RoI for {camera_id}" - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=detail) - os.remove(roi_file_path) - success = restart_processor() if reboot_processor else True - return handle_response(None, success, status.HTTP_204_NO_CONTENT) - - -@cameras_router.get("/{camera_id}/in_out_boundaries") -async def get_in_out_boundaries(camera_id: str): - """ - Get the In/Out Boundaries for a camera. - Each In/Out boundary in the list is represented by a name and: - Two coordinates `[x,y]` are given in 2-tuples `[A,B]`. These points form a **line**. - - If someone crosses the **line** while having **A** to their right, they are going in the `in` direction (entering). - - Crossing the **line** while having **A** to their left means they are going in the `out` direction (leaving). - """ - validate_camera_existence(camera_id) - in_out_file_path = InOutMetric.get_in_out_file_path(camera_id, settings.config) - in_out_boundaries = InOutMetric.read_in_out_boundaries(in_out_file_path) - if in_out_boundaries is None: - error_detail = f"There is no defined In/Out Boundary for {camera_id}" - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=error_detail) - return InOutBoundaries(**dict(in_out_boundaries)) - - -@cameras_router.put("/{camera_id}/in_out_boundaries", status_code=status.HTTP_201_CREATED) -async def add_or_replace_in_out_boundaries(camera_id: str, body: InOutBoundaries, reboot_processor: Optional[bool] = True): - """ - Create or replace the In/Out boundaries for a camera. - Each In/Out boundary in the list is represented by a name and: - Two coordinates `[x,y]` are given in 2-tuples `[A,B]`. These points form a **line**. - - If someone crosses the **line** while having **A** to their right, they are going in the `in` direction (entering). - - Crossing the **line** while having **A** to their left means they are going in the `out` direction (leaving). - """ - validate_camera_existence(camera_id) - in_out_file_path = InOutMetric.get_in_out_file_path(camera_id, settings.config) - dir_path = Path(in_out_file_path).parents[0] - Path(dir_path).mkdir(parents=True, exist_ok=True) - in_out_boundaries = body.dict() - with open(in_out_file_path, "w") as outfile: - json.dump(in_out_boundaries, outfile) - restart_processor() if reboot_processor else True - return body - - -@cameras_router.delete("/{camera_id}/in_out_boundaries") -async def remove_in_out_boundaries(camera_id: str, reboot_processor: Optional[bool] = True): - """ - Delete the defined In/Out boundaries for a camera. - """ - validate_camera_existence(camera_id) - in_out_file_path = InOutMetric.get_in_out_file_path(camera_id, settings.config) - if not validate_file_exists_and_is_not_empty(in_out_file_path): - detail = f"There is no defined In/Out Boundary for {camera_id}" - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=detail) - os.remove(in_out_file_path) - success = restart_processor() if reboot_processor else True - return handle_response(None, success, status.HTTP_204_NO_CONTENT) diff --git a/libs/loggers/source_loggers/video_logger.py b/libs/loggers/source_loggers/video_logger.py index c732a4ba..cf512305 100644 --- a/libs/loggers/source_loggers/video_logger.py +++ b/libs/loggers/source_loggers/video_logger.py @@ -5,7 +5,6 @@ from libs.detectors.utils.ml_model_functions import get_model_json_file_or_return_default_values from libs.utils import visualization_utils -from libs.source_post_processors.objects_filtering import ObjectsFilteringPostProcessor class VideoLogger: @@ -19,7 +18,6 @@ def __init__(self, config, source: str, logger: str): self.out_birdseye = None self.live_feed_enabled = self.config.get_boolean(source, "LiveFeedEnabled") self.track_hist = dict() - self.roi_file_path = ObjectsFilteringPostProcessor.get_roi_file_path(self.camera_id, self.config) def start_logging(self, fps): if not self.live_feed_enabled: @@ -102,10 +100,6 @@ def update(self, cv_image, objects, post_processing_data, fps, log_time): self.camera_id )["variables"]["ClassID"] ) - roi_contour = ObjectsFilteringPostProcessor.get_roi_contour(self.roi_file_path) - if roi_contour is not None: - color = (41, 127, 255) # #ff7f29 (255, 127, 41) - visualization_utils.draw_contour(cv_image, roi_contour, color) output_dict = visualization_utils.visualization_preparation(objects, distancings, dist_threshold) category_index = {class_id: { diff --git a/libs/source_post_processors/objects_filtering.py b/libs/source_post_processors/objects_filtering.py index 4e422366..b55e02af 100644 --- a/libs/source_post_processors/objects_filtering.py +++ b/libs/source_post_processors/objects_filtering.py @@ -1,11 +1,5 @@ -import os -import cv2 as cv import numpy as np -from pathlib import Path - -from ..utils.config import get_source_config_directory -from ..utils.utils import validate_file_exists_and_is_not_empty class ObjectsFilteringPostProcessor: @@ -15,9 +9,6 @@ def __init__(self, config, source: str, post_processor: str): self.overlap_threshold = float( self.config.get_section_dict(post_processor)["NMSThreshold"] ) - camera_id = config.get_section_dict(source)["Id"] - roi_file_path = self.get_roi_file_path(camera_id, config) - self.roi_contour = self.get_roi_contour(roi_file_path) @staticmethod def ignore_large_boxes(object_list): @@ -92,63 +83,9 @@ def non_max_suppression_fast(object_list, overlapThresh): updated_object_list = [j for i, j in enumerate(object_list) if i in pick] return updated_object_list - @staticmethod - def is_inside_roi(detected_object, roi_contour): - """ - An object is inside the RoI if its middle bottom point lies inside it. - params: - detected_object: a dictionary, that has attributes of a detected object such as "id", - "centroid" (a tuple of the normalized centroid coordinates (cx,cy,w,h) of the box), - "bbox" (a tuple of the normalized (xmin,ymin,xmax,ymax) coordinate of the box), - "centroidReal" (a tuple of the centroid coordinates (cx,cy,w,h) of the box) and - "bboxReal" (a tuple of the (xmin,ymin,xmax,ymax) coordinate of the box) - - roi_contour: An array of 2-tuples that compose the contour of the RoI - returns: - True of False: Depending if the objects coodinates are inside the RoI - """ - corners = detected_object["bboxReal"] - x1, x2 = int(corners[0]), int(corners[2]) - y1, y2 = int(corners[1]), int(corners[3]) - if cv.pointPolygonTest(roi_contour, (x1 + (x2-x1)/2, y2), False) >= 0: - return True - return False - - @classmethod - def ignore_objects_outside_roi(cls, objects_list, roi_contour): - - """ - If a Region of Interest is defined, filer boxes which middle bottom point lies outside the RoI. - params: - object_list: a list of dictionaries. each dictionary has attributes of a detected object such as - "id", "centroid" (a tuple of the normalized centroid coordinates (cx,cy,w,h) of the box) and "bbox" (a tuple - of the normalized (xmin,ymin,xmax,ymax) coordinate of the box) - - roi_contour: An array of 2-tuples that compose the contour of the RoI - returns: - object_list: input object list with only the objets that fall under the Region of Interest. - """ - - return [obj for obj in objects_list if cls.is_inside_roi(obj, roi_contour)] - - @staticmethod - def get_roi_file_path(camera_id, config): - """ Returns the path to the roi_contour file """ - return f"{get_source_config_directory(config)}/{camera_id}/roi_filtering/roi_contour.csv" - - @staticmethod - def get_roi_contour(roi_file_path): - """ Given the path to the roi file it loads it and returns it """ - if validate_file_exists_and_is_not_empty(roi_file_path): - return np.loadtxt(roi_file_path, delimiter=',', dtype=int) - else: - return None - def filter_objects(self, objects_list): new_objects_list = self.ignore_large_boxes(objects_list) new_objects_list = self.non_max_suppression_fast(new_objects_list, self.overlap_threshold) - if self.roi_contour is not None: - new_objects_list = self.ignore_objects_outside_roi(new_objects_list, self.roi_contour) return new_objects_list def process(self, cv_image, objects_list, post_processing_data): diff --git a/libs/utils/in_out.py b/libs/utils/in_out.py deleted file mode 100644 index 2a8bef8b..00000000 --- a/libs/utils/in_out.py +++ /dev/null @@ -1,88 +0,0 @@ -import numpy as np -from numpy import linalg as LA - - -# Auxiliary methods taken from: -# https://github.com/yas-sim/object-tracking-line-crossing-area-intrusion/blob/master/object-detection-and-line-cross.py -def check_line_cross(boundary_line, trajectory): - """ - Args: - boundary_line: Two coordinates [x,y] are in 2-tuples [A,B] - Boundaries of the in/out line. - If someone crosses the line while having A to their right, they are going in the in direction (entering) - Crossing the line while having A to their left means they are going in the out direction (leaving) - trajectory: vector ((x1, y1), (x2, y2)) - - Returns: - (in, out) : tuple - (1, 0) - if the trajectory crossed the boundary entering (in) - (0, 1) - if the trajectory crossed the boundary leaving (out) - (0, 0) - if the trajectory didn't cross the boundary. - """ - traj_p0 = (trajectory[0][0], trajectory[0][1]) # Trajectory of an object - traj_p1 = (trajectory[1][0], trajectory[1][1]) - b_line_p0 = (boundary_line[0][0], boundary_line[0][1]) # Boundary line - b_line_p1 = (boundary_line[1][0], boundary_line[1][1]) - intersect = check_intersect(traj_p0, traj_p1, b_line_p0, b_line_p1) # Check if intersect or not - if intersect == False: - return 0, 0 - - angle = calc_vector_angle(traj_p0, traj_p1, b_line_p0, b_line_p1) # Calculate angle between trajectory and boundary line - if angle < 180: # in - return 1, 0 - else: # out - return 0, 1 - -def check_intersect(p1, p2, p3, p4): - """ - Check if the line p1-p2 intersects the line p3-p4 - Args: - p1: (x,y) - p2: (x,y) - p3: (x,y) - p4: (x,y) - - Returns: - boolean : True if intersection occurred - """ - tc1 = (p1[0] - p2[0]) * (p3[1] - p1[1]) + (p1[1] - p2[1]) * (p1[0] - p3[0]) - tc2 = (p1[0] - p2[0]) * (p4[1] - p1[1]) + (p1[1] - p2[1]) * (p1[0] - p4[0]) - td1 = (p3[0] - p4[0]) * (p1[1] - p3[1]) + (p3[1] - p4[1]) * (p3[0] - p1[0]) - td2 = (p3[0] - p4[0]) * (p2[1] - p3[1]) + (p3[1] - p4[1]) * (p3[0] - p2[0]) - return tc1 * tc2 < 0 and td1 * td2 < 0 - -def calc_vector_angle(line1_p1, line1_p2, line2_p1, line2_p2): - """ - Calculate the and return the angle made by two line segments line1(p1)-(p2), line2(p1)-(p2) - Args: - line1_p1: (x,y) - line1_p2: (x,y) - line2_p1: (x,y) - line2_p2: (x,y) - - Returns: - angle : [0, 360) - """ - u = np.array(line_vectorize(line1_p1, line1_p2)) - v = np.array(line_vectorize(line2_p1, line2_p2)) - i = np.inner(u, v) - n = LA.norm(u) * LA.norm(v) - c = i / n - a = np.rad2deg(np.arccos(np.clip(c, -1.0, 1.0))) - if u[0] * v[1] - u[1] * v[0] < 0: - return a - else: - return 360 - a - -def line_vectorize(point1, point2): - """ - Args: - point1: (x,y) - point2: (x,y) - - Returns: - The vector of intersecting the points with a line line(point1)-(point2) - """ - a = point2[0] - point1[0] - b = point2[1] - point1[1] - return [a, b] From cc785338fb493dbdb5e536aee620946f6c1622af Mon Sep 17 00:00:00 2001 From: Pablo Grill Date: Mon, 2 Aug 2021 10:17:05 -0300 Subject: [PATCH 5/7] Changing the README. --- README.md | 74 +++++++++++-------------------------------------------- 1 file changed, 14 insertions(+), 60 deletions(-) diff --git a/README.md b/README.md index cd15ce95..e7f5b500 100644 --- a/README.md +++ b/README.md @@ -15,23 +15,23 @@ - [Supported video feeds formats](#supported-video-feeds-formats) - [Change the default configuration](#change-the-default-configuration) - [API usage](#api-usage) - - [Interacting with the processors' generated information](#interacting-with-the-processors-generated-information) - [Issues and Contributing](#issues-and-contributing) - [Contact Us](#contact-us) - [License](#license) ## Introduction -Smart Distancing is an open-source application to quantify social distancing measures using edge computer vision systems. Since all computation runs on the device, it requires minimal setup and minimizes privacy and security concerns. It can be used in retail, workplaces, schools, construction sites, healthcare facilities, factories, etc. +Smart Distancing is an open-source application to quantify social distancing, facemask usage and occupancy measures using edge computer vision systems. Since all video computation runs on the device, it requires minimal setup and minimizes privacy and security concerns. It can be used in retail, workplaces, schools, construction sites, healthcare facilities, factories, etc.
-You can run this application on edge devices such as NVIDIA's Jetson Nano / TX2 or Google's Coral Edge-TPU. This application measures social distancing rates and gives proper notifications each time someone ignores social distancing rules. By generating and analyzing data, this solution outputs statistics about high-traffic areas that are at high risk of exposure to COVID-19 or any other contagious virus. +You can run this application on edge devices such as NVIDIA's Jetson Nano / TX2 or Google's Coral Edge-TPU. This application measures social distancing rates and detects if someone ignores social distancing rules. -If you want to understand more about the architecture you can read the following [post](https://neuralet.com/article/smart-social-distancing/). +Also, you can connect multiple processors to Lanthorn's cloud dashboard and generate interesting reports or emit notifications that will help you to analyze your data and outputs statistics about high-traffic areas that are at high risk of exposure to COVID-19 or any other contagious virus. +If you want to understand more about the architecture you can read the following [post](https://neuralet.com/article/smart-social-distancing/). Please join [our slack channel](https://join.slack.com/t/neuralet/shared_invite/zt-g1w9o45u-Y4R2tADwdGBCruxuAAKgJA) or reach out to covid19project@neuralet.com if you have any questions. @@ -446,6 +446,8 @@ Please note that the bash script may require permissions to execute (run `chmod If you are running the processor directly from the Docker Hub repository, remember to copy/paste the script in the execution folder before adding the flag ``` -e TZ=`./timezone.sh` ```. +If you are using the Lanthorn cloud dashboard for processing the metrics, remember to put **all the processors in the same time zone**, otherwise, the aggregated results will display wrong results. + #### Persisting changes We recommend adding the projects folder as a mounted volume (`-v "$PWD":/repo`) if you are building the docker image. If you are using the already built one we recommend creating a directory named `data` and mount it (`-v $PWD/data:/repo/data`). @@ -519,6 +521,7 @@ Please note that: If you want to integrate an IP camera that uses a private protocol, you should check with the camera provider if the device supports exporting its stream in a public protocol. For example, [WYZE](https://wyze.com/) doesn't support RTSP as default, but [you have the possibility of installing a firmware that supports it](https://wyzelabs.zendesk.com/hc/en-us/articles/360026245231-Wyze-Cam-RTSP). + Same goes for [Google Nest Cameras](https://developers.google.com/nest/device-access/traits/device/camera-live-stream), although here a token must be kept alive to access the RTSP stream ### Change the default configuration @@ -547,10 +550,8 @@ All the configurations are grouped in *sections* and some of them can vary depen - `Resolution`: Specifies the image resolution that the whole processor will use. If you are using a single camera we recommend using that resolution. - `Encoder`: Specifies the video encoder used by the processing pipeline. - `MaxProcesses`: Defines the number of processes executed in the processor. If you are using multiple cameras per processor we recommend increasing this number. - - `DashboardURL`: Sets the url where the frontend is running. Unless you are using a custom domain, you should keep this value as https://app.lanthorn.ai/. + - `DashboardURL`: Sets the url where the cloud dashboard is running. Unless you are using a custom domain, you should keep this value as https://app.lanthorn.ai/. - `DashboardAuthorizationToken`: Configures the Authorization header required to sync the processor and the dashboard. - - `SlackChannel`: Configures the slack channel used by the notifications. The chosen slack channel must exist in the configured workspace. - - `OccupancyAlertsMinInterval`: Sets the desired interval (in seconds) between occupancy alerts. - `MaxThreadRestarts`: Defines the number of restarts allowed per thread. - `HeatmapResolution`: Sets the resolution used by the heatmap report. - `LogPerformanceMetrics`: A boolean parameter to enable/disable the logging of "Performance Metrics" in the default processor log. @@ -581,19 +582,6 @@ All the configurations are grouped in *sections* and some of them can vary depen - `QueuePort`: Sets the port of the *QueueManager* (inside docker). - `QueueAuthKey`: Configures the auth key required to interact with the *QueueManager*. -- `[Area_N]`: - - A single processor can manage multiple areas and all of them must be configured in the config file. You can generate this configuration in 3 different ways: directly in the config file, using the [UI](https://app.lanthorn.ai) or using the API. - - `Id`: A string parameter to identify each area. This value must be *unique*. - - `Name`: A string parameter to name each area. Although you can repeat the same name in multiple areas, we recommend don't do that. - - `Cameras`: Configures the cameras (using the *ids*) included in the area. If you are configuring multiple cameras you should write the ids separated by commas. Each area should have at least one camera. - - `NotifyEveryMinutes` and `ViolationThreshold`: Defines the *period of time* and *number of social distancing violations* desired to send notifications. For example, if you want to notify when *occurs more than 10 violations every 15 minutes*, you must set `NotifyEveryMinutes` in 15 and `ViolationThreshold` in 10. - - `Emails`: Defines the emails list to receive the notification. Multiple emails can be written separating them by commas. - - `EnableSlackNotifications`: A boolean parameter to enable/disable the Slack integration for notifications and daily reports. We recommend not editing this parameter directly and manage it from the [UI](https://app.lanthorn.ai) to configure your workspace correctly. - - `OccupancyThreshold`: Defines the occupancy violation threshold. For example, if you want to notify when *there is more than 20 persons in the area* you must set `OccupancyThreshold` in 20. - - `DailyReport`: When the parameter is set in *True*, the information of the previous day is sent in a summary report. - - `DailyReportTime`: If the daily report is enabled, you can choose the time to receive the report. By default, the report is sent at 06:00. - - `[Source_N]`: In the config files, we use the *source* sections to specifies the camera's configurations. Similarly to the areas, a single processor can manage multiple cameras and all of them must be configured in the config file. You can generate this configuration in 3 different ways: directly in the config file, using the [UI](https://app.lanthorn.ai) or using the API. @@ -602,11 +590,6 @@ All the configurations are grouped in *sections* and some of them can vary depen - `Name`: A string parameter to name each area. Although you can repeat the same name in multiple cameras, we recommend don't do that. - `VideoPath`: Sets the path or url required to get the camera's video stream. - `Tags`: List of tags (separated by commas). This field only has an informative propose, change that value doesn't affect the processor behavior. - - `NotifyEveryMinutes` and `ViolationThreshold`: Defines the *period of time* and *number of social distancing violations* desired to send notifications. For example, if you want to notify when *occurs more than 10 violations every 15 minutes*, you must set `NotifyEveryMinutes` in 15 and `ViolationThreshold` in 10. - - `Emails`: Defines the emails list to receive the notification. Multiple emails can be written separating them by commas. - - `EnableSlackNotifications`: A boolean parameter to enable/disable the Slack integration for notifications and daily reports. We recommend not editing this parameter directly and manage it from the [UI](https://app.lanthorn.ai) to configure your workspace correctly. - - `DailyReport`: When the parameter is set in *True*, the information of the previous day is sent in a summary report. - - `DailyReportTime`: If the daily report is enabled, you can choose the time to receive the report. By default, the report is sent at 06:00. - `DistMethod`: Configures the chosen distance method used by the processor to detect the violations. There are three different values: CalibratedDistance, CenterPointsDistance and FourCornerPointsDistance. If you want to use *CalibratedDistance* you will need to calibrate the camera from the [UI](https://app.lanthorn.ai). - `LiveFeedEnabled`: A boolean parameter that enables/disables the video live feed for the source. @@ -666,18 +649,10 @@ All the configurations are grouped in *sections* and some of them can vary depen - `Endpoint`: Configures an endpoint url. - `Authorization`: Configures the Authorization header. For example: *Bearer *. - `SendingInterval`: Configures the desired time interval (in seconds) to send data into the configured endpoint. - -- `[AreaLogger_N]`: - - Similar to the section *SourceLogger_N* (for areas instead of cameras), we support multiple loggers (right now only 1, but we plan to include new ones in the future) that you enable/disable uncommenting/commenting them or with the *Enabled* flag. - - `file_system_logger`: Stores the occupancy data in a folder inside the processor. - - `LogDirectory`: Defines the location where the generated files will be stored. - `[PeriodicTask_N]`: - The processor also supports the execution of periodic tasks to generate reports, accumulate metrics, backup your files, etc. For now, we support the *metrics* and *s3_backup* tasks. You can enable/disable these functionalities uncommenting/commenting the section or with the *Enabled* flag. - - `metrics`: Generates different reports (hourly, daily and live) with information about the social distancing infractions, facemask usage and occupancy in your cameras and areas. You need to have it enabled to see data in the [UI](https://app.lanthorn.ai) dashboard or use the `/metrics` endpoints. - - `LiveInterval`: Expressed in minutes. Defines the time interval desired to generate live information. + The processor also supports the execution of periodic tasks. For now, we only support the *s3_backup* task. You can enable/disable thus functionality uncommenting/commenting the section or with the *Enabled* flag. - `s3_backup`: Back up into an S3 bucket all the generated data (raw data and reports). To enable the functionality you need to configure the aws credentials following the steps explained in the section [Configuring AWS credentials](#configuring-aws-credentials). - `BackupInterval`: Expressed in minutes. Defines the time interval desired to back up the raw data. - `BackupS3Bucket`: Configures the S3 Bucket used to store the backups. @@ -686,6 +661,7 @@ All the configurations are grouped in *sections* and some of them can vary depen #### Use different models per camera By default, all video streams are processing running against the same ML model. When a processing threads starts running it verifies if a configuration .json file exists in the path: /repo/data/processor/config/sources//ml_models/model_.json + If no custom configuration is detected, a file will be generated using the default values from the `[Detector]` section, documented above. These JSONs contain the configuration of which ML Model is used for processing said stream, and can be modified either manually or using the endpoint `/ml_model` documented below. Please note that models that differ in their location or name regarding the `./download_` scripts must specify their location in the field `file_path`. @@ -696,7 +672,6 @@ After you run the processor on your node, you can use the exposed API to control The available endpoints are grouped in the following subapis: - `/config`: provides a pair of endpoint to retrieve and overwrite the current configuration file. - `/cameras`: provides endpoints to execute all the CRUD operations required by cameras. These endpoints are very useful to edit the camera's configuration without restarting the docker process. Additionally, this subapi exposes the calibration endpoints. -- `/areas`: provides endpoints to execute all the CRUD operations required by areas. - `/app`: provides endpoints to retrieve and update the `App` section in the configuration file. - `/api`: provides endpoints to retrieve the `API` section in the configuration file. - `/core`: provides endpoints to retrieve and update the `CORE` section in the configuration file. @@ -705,11 +680,8 @@ The available endpoints are grouped in the following subapis: - `/tracker`: provides endpoints to retrieve and update the `Tracker` section in the configuration file. - `/source_post_processors`: provides endpoints to retrieve and update the `SourcePostProcessor_N` sections in the configuration file. You can use that endpoint to enable/disable a post processor step, change a parameter, etc. - `/source_loggers`: provides endpoints to retrieve and update the `SourceLoggers_N` sections in the configuration file. You can use that endpoint to enable/disable a logger, change a parameter, etc. -- `/area_loggers`: provides endpoints to retrieve and update the `AreaLoggers_N` sections in the configuration file. You can use that endpoint to enable/disable a post processor step, change a parameter, etc. - `/periodict_tasks`: provides endpoints to retrieve and update the `PeriodicTask_N` sections in the configuration file. You can use that endpoint to enable/disable the metrics generation. -- `/metrics`: a set of endpoints to retrieve the data generated by the metrics periodic task. - `/export`: an endpoint to export (in zip format) all the data generated by the processor. -- `/slack`: a set of endpoints required to configure Slack correctly in the processor. We recommend to use these endpoints from the [UI](https://app.lanthorn.ai) instead of calling them directly. - `/auth`: a set of endpoints required to configure OAuth2 in the processors' endpoints. - `/ml_model`: an endpoint to edit the ML model and its parameters, that is used to process certain camera's video feed. @@ -722,23 +694,12 @@ The complete list of endpoints, with a short description and the signature speci ***NOTE*** Most of the endpoints update the config file given in the Dockerfile. If you don't have this file mounted (see section [Persisting changes](#persisting-changes)), these changes will be inside your container and will be lost after stopping it. -### Interacting with the processors' generated information - -#### Generated information -The generated information can be split into 3 categories: - - `Raw data`: This is the most basic level of information. It only includes the results of the detector, classifier, tracker, and any configured post-processor step. - - `Metrics data`: **Only written if you have enabled the metrics periodic task** (see [section](#change-the-default-configuration)). These include metrics related to occupancy, social-distancing, and facemask usage; aggregated by hour and day. - - `Notifications`: Situations that require an immediate response (such as surpassing the maximum occupancy threshold for an area) and need to be notified ASAP. The currently supported notification channels are email and slack. - -#### Accessing and storing the information -All of the information that is generated by the processor is stored (by default) inside the edge device for security reasons. However, the processor provides features to easily export or backup the data to another system if required. - ##### Storing the raw data -The raw data storage is managed by the `SourceLogger` and `AreaLogger` steps. By default, only the `video_logger` and the `file_system_logger` are enabled. As both steps store the data inside the processor (by default the folder `/repo/data/processor/static/`), we strongly recommend mounting that folder to keep the data safe when the process is restarted ([Persisting changes](#persisting-changes)). + +The raw data storage is managed by the `SourceLogger` step. By default, only the `video_logger` and the `file_system_logger` are enabled. As both steps store the data inside the processor (by default the folder `/repo/data/processor/static/`), we strongly recommend mounting that folder to keep the data safe when the process is restarted ([Persisting changes](#persisting-changes)). Moreover, we recommend keeping active these steps because the [frontend](https://app.lanthorn.ai) and the metrics need them. -If you need to store (or process) the raw data in *real-time* outside the processor, you can activate the `web_hook_logger` and implement an endpoint that handles these events. -The `web_hook_logger` step is configured to send an event (a PUT request) using the following format: +If you need to use the raw data to feed the lanthorn cloud dashboard, you need to activate the `web_hook_logger` and sync your processor with our servers. All the videos are processed inside the edge device, there is *no video information* sent to the dashboard cloud instance. The only information that the webhook logger sends is anonymous data following the format: ``` { @@ -752,17 +713,10 @@ The `web_hook_logger` step is configured to send an event (a PUT request) using } ``` -You only need to implement an endpoint that matches the previous signature; configure its URL in the config file and the integration will be done. We recommend this approach if you want to integrate "Smart social distancing" with another existing system with real-time data. +If you want to create your dashboard with the processed data, you only need to implement an endpoint that matches the previous signature; configure its URL in the config file and the integration will be done. This approach is also useful if you want to integrate "Smart social distancing" with another existing system with real-time data. Another alternative is to activate the periodic task `s3_backup`. This task will back up all the generated data (raw data and metrics) inside the configured S3 bucket, according to the time interval defined by the `BackupInterval` parameter. Before enabling this feature remember to configure AWS following the steps defined in the section [Configuring AWS credentials](#configuring-aws-credentials). -##### Accessing the metrics data -The data of aggregated metrics is stored in a set of CSV files inside the device. For now, we don't have implemented any mechanism to store these files outside the processor (the `web_hook_logger` only sends "raw data" events). -However, if you enable the `s3_backup` task, the previous day's metrics files will be backed up at AWS at the beginning of the day. - -You can easily visualize the metrics information in the dashboard exposed in the [frontend](https://app.lanthorn.ai). -In addition, you can retrieve the same information through the API (see the metrics section in the API documentation exposed in http://:/docs#/Metrics). - ##### Exporting the data In addition to the previous features, the processor exposes an endpoint to export in zip format all the generated data. The signature of this endpoint can be found in http://:/docs#/Export. From 997ab0011b7505b71213c4b8bf7ab2f17ef91fa3 Mon Sep 17 00:00:00 2001 From: Pablo Grill Date: Mon, 2 Aug 2021 10:53:39 -0300 Subject: [PATCH 6/7] Small fixs in the Readme. --- README.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/README.md b/README.md index e7f5b500..c4d6828f 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,7 @@ - [Supported video feeds formats](#supported-video-feeds-formats) - [Change the default configuration](#change-the-default-configuration) - [API usage](#api-usage) + - [Interacting with the processors' generated information](#interacting-with-the-processors-generated-information) - [Issues and Contributing](#issues-and-contributing) - [Contact Us](#contact-us) - [License](#license) @@ -694,6 +695,18 @@ The complete list of endpoints, with a short description and the signature speci ***NOTE*** Most of the endpoints update the config file given in the Dockerfile. If you don't have this file mounted (see section [Persisting changes](#persisting-changes)), these changes will be inside your container and will be lost after stopping it. + +### Interacting with the processors' generated information + +#### Generated information +The generated information can be split into 3 categories: + - `Raw data`: This is the most basic level of information. It only includes the results of the detector, classifier, tracker, and any configured post-processor step. All the video information generated (such as the live feed) is **stored inside the edge device and is never sent outside it**. + - `Metrics data`: **Generated in Lanthorn's cloud dashboard, only generated if you register a processor in your account.**. These include metrics related to occupancy, social-distancing, and facemask usage; aggregated by hour and day. When you enabled that functionality, some raw data needs to be sent from your device to our cloud engine. However, **only numeric and statistical data is sent. No video or personal information is sent from your device to Lanthorn's servers.** + - `Notifications`: Situations that require an immediate response (such as surpassing the maximum occupancy threshold for an area) and need to be notified ASAP. The currently supported notification channels are email and slack. Same that the metrics, this feature is supported by Lanthorn's cloud engine. + +#### Accessing and storing the information +Most of the information that is generated by the processor is stored (by default) inside the edge device for security reasons. However, the processor provides features to easily export or backup the data to another system if required. + ##### Storing the raw data The raw data storage is managed by the `SourceLogger` step. By default, only the `video_logger` and the `file_system_logger` are enabled. As both steps store the data inside the processor (by default the folder `/repo/data/processor/static/`), we strongly recommend mounting that folder to keep the data safe when the process is restarted ([Persisting changes](#persisting-changes)). From e0f7542411cb5b73722d656cceb7569312472ae8 Mon Sep 17 00:00:00 2001 From: Pablo Grill Date: Wed, 11 Aug 2021 11:54:27 -0300 Subject: [PATCH 7/7] Fixing tests. Rebasing with master. --- api/models/config.py | 8 ++++++++ api/tests/app/test_camera.py | 2 -- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/api/models/config.py b/api/models/config.py index b1e22f99..db98886e 100644 --- a/api/models/config.py +++ b/api/models/config.py @@ -29,10 +29,18 @@ class ConfigDTO(SnakeModel): periodicTasks: Optional[List[PeriodicTaskDTO]] = [] +class ConfigMetrics(BaseModel): + social_distancing: bool + facemask: bool + occupancy: bool + in_out: bool + + class ConfigInfo(BaseModel): version: str device: str has_been_configured: bool + metrics: ConfigMetrics class Config: schema_extra = { diff --git a/api/tests/app/test_camera.py b/api/tests/app/test_camera.py index e5d95768..ef889682 100644 --- a/api/tests/app/test_camera.py +++ b/api/tests/app/test_camera.py @@ -171,8 +171,6 @@ def test_edit_a_camera_properly(self, config_rollback, camera_sample, rollback_c camera_id = camera_sample["id"] body = { - "violation_threshold": 22, - "notify_every_minutes": 22, "id": camera_id, "name": "new_Kitchen", "video_path": "/repo/api/tests/data/mocked_data/data/softbio_vid.mp4",