Skip to content

Commit

Permalink
Linting integration (#8)
Browse files Browse the repository at this point in the history
* algorithm linting

* PyQt5 import-error still there but others kinda okay

* pylint changes so that no exit code 30 hopefully

* requirements file

* remove pywin32 to see if lint job runs

* more linting-can merge and do style sheets

---------

Co-authored-by: Skaiste Motiejunaite <[email protected]>
  • Loading branch information
SkaisteMot and SkaisteMotiejunaite authored Jan 9, 2025
1 parent c4c0ada commit 9d24949
Show file tree
Hide file tree
Showing 12 changed files with 96 additions and 61 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/pylint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,6 @@ jobs:
pip install pylint # Ensuring pylint is explicitly installed
# Run pylint on Python files
- name: Analyzing the code with pylint
- name: Analysing the code with pylint
run: |
pylint $(git ls-files '*.py')
pylint $(git ls-files '*.py') --output=lint.txt || true
10 changes: 6 additions & 4 deletions .pylintrc
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
[MAIN]
[MAIN]
# To get rid of App\main.py:3:0: E0401: Unable to import 'PyQt5.QtWidgets' (import-error)
init-hook=import sys; sys.path.append(r'C:\Users\skais\ThesisProject\DevCode\.venv\Lib\site-packages')

# Analyse import fallback blocks. This can be used to support both Python 2 and
# 3 compatible code, which means that the block might have code that exists
Expand All @@ -25,13 +27,13 @@ clear-cache-post-run=no
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code.
extension-pkg-allow-list=cv2
extension-pkg-allow-list=cv2,fer,PyQt5

# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code. (This is an alternative name to extension-pkg-allow-list
# for backward compatibility.)
extension-pkg-whitelist=cv2
extension-pkg-whitelist=cv2,fer,PyQt5

# Return non-zero exit code if any of these messages/categories are detected,
# even if score is above --fail-under value. Syntax same as enable. Messages
Expand Down Expand Up @@ -571,7 +573,7 @@ contextmanager-decorators=contextlib.contextmanager
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=
generated-members=cv2.*

# Tells whether to warn about missing members when the owner of the attribute
# is inferred to be None.
Expand Down
9 changes: 7 additions & 2 deletions Algorithms/Body/emotion_recognition.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,19 @@
"""Emotion Recogntion, display relevent emoji in seperate window"""
from dataclasses import dataclass
from typing import Optional
import cv2
import numpy as np
from fer import FER
from dataclasses import dataclass
from typing import Optional

@dataclass
class EmotionResult:
main_frame: np.ndarray
emoji: np.ndarray

class EmotionRecognizer:
"""
Emotion recognizer class called by the ui
"""
def __init__(self, emoji_paths):
self.emoji_paths = emoji_paths
self.emoji_icons = self._load_emojis()
Expand All @@ -34,6 +38,7 @@ def _get_emotion_emoji(self, emotion):
return self.emoji_icons.get(emotion, self.blank_image)

def process_frame(self) -> Optional[EmotionResult]:
"""Process the input and output the relevant emoji"""
ret, frame = self.cap.read()
if not ret:
return None
Expand Down
13 changes: 8 additions & 5 deletions Algorithms/Body/hand_gesture_test.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
# hand_gestures_test.py
"""Hand Gesture Recogniziton using Googles MediaPipe, followed the documentation provided"""
from dataclasses import dataclass
from typing import Optional
import cv2
import mediapipe as mp
import numpy as np
from mediapipe.tasks import python
from mediapipe.tasks.python import vision
from dataclasses import dataclass
from typing import Optional, Tuple

@dataclass
class GestureResult:
Expand All @@ -14,6 +14,7 @@ class GestureResult:
right_emoji: np.ndarray

class GestureRecognizer:
"""Gesture Recognizer called by UI"""
def __init__(self,icon_paths):
self.icon_paths=icon_paths
self.gesture_icons = self._load_icons()
Expand Down Expand Up @@ -50,11 +51,13 @@ def _get_gesture_emojis(self, gestures_and_landmarks):
return emoji_images[0], emoji_images[1]

def _setup_recognizer(self):
base_options = python.BaseOptions(model_asset_path='Algorithms/Body/gesture_recognizer.task')
base_options = python.BaseOptions(model_asset_path=
'Algorithms/Body/gesture_recognizer.task')
options = vision.GestureRecognizerOptions(base_options=base_options, num_hands=2)
return vision.GestureRecognizer.create_from_options(options)

def process_frame(self) -> Optional[GestureResult]:
"""Process input and return relevent hand emojis"""
ret, frame = self.cap.read()
if not ret:
return None
Expand Down Expand Up @@ -104,4 +107,4 @@ def _get_gesture_emojis(self, gestures_and_landmarks):

def release(self):
self.cap.release()
cv2.destroyAllWindows()
cv2.destroyAllWindows()
10 changes: 9 additions & 1 deletion Algorithms/Objects/colour_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,12 @@
import numpy as np

class ColourRecognizer:
"""Colour Recognizer called by UI"""
def __init__(self, colour_ranges_csv):
self.colour_ranges = self.load_colour_ranges(colour_ranges_csv)

def load_colour_ranges(self, csv_file):
"""load predefined hsv colour range from csv file"""
colour_data = pd.read_csv(csv_file)
colours = {}
for _, row in colour_data.iterrows():
Expand All @@ -23,14 +25,17 @@ def load_colour_ranges(self, csv_file):
return colours

def create_mask(self, hsv_img, lower, upper):
"""mask to only show colours in predefined ranges"""
return cv2.inRange(hsv_img, lower, upper)

def draw_bounding_box(self, image, contour, colour):
"""draw boxes around detected colours"""
x, y, w, h = cv2.boundingRect(contour)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 0), 2, lineType=cv2.LINE_AA)
cv2.putText(image, colour, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)

def count_colours(self, image, contours, colour, min_area):
"""count borders to check the amount for each colour"""
colour_count = 0
for contour in contours:
area = cv2.contourArea(contour)
Expand All @@ -40,11 +45,14 @@ def count_colours(self, image, contours, colour, min_area):
return colour_count

def display_colour_counts(self, image, colour_counts):
"""display the colour and its count """
y_offset = 30
for i, (colour, count) in enumerate(colour_counts.items()):
cv2.putText(image, f"{colour}: {count}", (10, y_offset + i * 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2)
cv2.putText(image, f"{colour}: {count}", (10, y_offset + i * 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2)

def detect_and_draw(self, image, min_area=300):
"""Main can change min area depending on scene"""
hsv_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
colour_counts = {}
for colour, (lower, upper) in self.colour_ranges.items():
Expand Down
3 changes: 2 additions & 1 deletion Algorithms/Objects/object_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,13 @@
https://medium.com/softplus-publication/video-object-tracking-with-yolov8-and-sort-library-e28444b189aa
"""
from ultralytics import YOLO
import cv2

class ObjectRecognizer:
"""Object Recognizer called from UI"""
def __init__(self, model_path):
self.model = YOLO(model_path)

def detect_and_draw(self, frame):
"""input into model"""
results = self.model.track(frame, persist=True)
return results[0].plot()
1 change: 1 addition & 0 deletions App/main.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
"""main file to start desktop application"""
import sys
from PyQt5.QtWidgets import QApplication
from pages.home_page import HomePage
Expand Down
17 changes: 10 additions & 7 deletions App/pages/facial_expression_page.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,16 @@
"""Facial expression page opened from home_Page.ui"""
import sys
import os
import cv2
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QLabel, QHBoxLayout
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtCore import QTimer, Qt

import sys
import os

# Add the DevCode directory to the Python path
sys.path.append(os.path.abspath("C:\\Users\\skais\\ThesisProject\\DevCode"))
from Algorithms.Body.emotion_recognition import EmotionRecognizer

import cv2

class FacialExpressionRecognitionPage(QWidget):
"""Emotion Recognition"""
def __init__(self):
super().__init__()
emoji_paths = {
Expand All @@ -32,6 +31,7 @@ def __init__(self):
self.timer.start(30)

def setup_ui(self):
"""setup face expression page ui"""
self.setWindowTitle("Facial Expression Recognition")
self.setGeometry(100, 100, 800, 600)

Expand Down Expand Up @@ -61,12 +61,14 @@ def setup_ui(self):
self.setLayout(main_layout)

def update_frame(self):
"""update frames from video stream and detected emotion"""
result = self.expression_recognizer.process_frame()
if result:
self.video_feed.setPixmap(self._convert_cv_to_qt(result.main_frame))
self.face_emoji.setPixmap(self._convert_cv_to_qt(result.emoji))

def _convert_cv_to_qt(self, cv_img):
"""convert cv2 img to qpixmap for display in qlabel"""
if cv_img is None:
return QPixmap()
if len(cv_img.shape) == 2: # Grayscale
Expand All @@ -78,6 +80,7 @@ def _convert_cv_to_qt(self, cv_img):
qt_image = QImage(rgb_image.data, w, h, ch * w, QImage.Format_RGB888)
return QPixmap.fromImage(qt_image)

def closeEvent(self, event):
def close_event(self, event):
"""handle close event to release resources"""
self.expression_recognizer.release()
event.accept()
19 changes: 11 additions & 8 deletions App/pages/general_page.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,18 @@
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QLabel, QHBoxLayout
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap, QImage
"""Page that is used when only a stream is outputted, object detection, colour detection etc"""
# Add the DevCode directory to the Python path
import sys
import os
import cv2
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QLabel, QHBoxLayout
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap, QImage

# Add the DevCode directory to the Python path
sys.path.append(os.path.abspath("C:\\Users\\skais\\ThesisProject\\DevCode"))
from Algorithms.Objects.colour_detection import ColourRecognizer
from Algorithms.Objects.object_detection import ObjectRecognizer


class GeneralDemoPage(QWidget):
"""page used for general display of streams"""
def __init__(self, title: str, description: str, algorithm: str):
super().__init__()
self.setWindowTitle(title)
Expand Down Expand Up @@ -65,7 +66,8 @@ def __init__(self, title: str, description: str, algorithm: str):
self.failed_frames = 0 # Counter for consecutive failed frames
self.timer = self.startTimer(20)

def timerEvent(self, event):
def timer_event(self):
"""handle timer event for capturing and processing frames"""
ret, frame = self.cap.read()
if not ret:
self.failed_frames += 1
Expand All @@ -86,15 +88,16 @@ def timerEvent(self, event):
processed_frame = self.recognizer.detect_and_draw(frame)

# Convert processed frame to QImage
height, width, channel = processed_frame.shape
height, width = processed_frame.shape
bytes_per_line = 3 * width
qimg = QImage(processed_frame.data, width, height, bytes_per_line, QImage.Format_BGR888)

# Update the video frame (using QLabel's setPixmap)
pixmap = QPixmap.fromImage(qimg)
self.video_label.setPixmap(pixmap)

def closeEvent(self, event):
def close_event(self, event):
"""handle close event to release resources"""
# Release the video capture and stop the algorithm
if hasattr(self, 'cap') and self.cap.isOpened():
self.cap.release()
Expand Down
31 changes: 10 additions & 21 deletions App/pages/hand_gesture_page.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,16 @@
# hand_gesture_page.py
"""page for hand gesture detection, opened from home_page.py button"""
import sys
import os
import cv2
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QLabel, QHBoxLayout
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtCore import QTimer, Qt

import sys
import os

# Add the DevCode directory to the Python path
sys.path.append(os.path.abspath("C:\\Users\\skais\\ThesisProject\\DevCode"))
from Algorithms.Body.hand_gesture_test import GestureRecognizer

import cv2

class HandGestureRecognitionPage(QWidget):
"""Hand gesture recognizer"""
def __init__(self):
super().__init__()
icon_paths = {
Expand All @@ -32,6 +30,7 @@ def __init__(self):
self.timer.start(30)

def setup_ui(self):
"""UI for hand page setup"""
self.setWindowTitle("Hand Gesture Recognition")
self.setGeometry(100, 100, 1200, 600)

Expand Down Expand Up @@ -70,13 +69,15 @@ def setup_ui(self):
self.setLayout(main_layout)

def update_frame(self):
"""update the video feed and hand gesture icons"""
result = self.gesture_recognizer.process_frame()
if result:
self.video_feed.setPixmap(self._convert_cv_to_qt(result.main_frame))
self.left_emoji.setPixmap(self._convert_cv_to_qt(result.left_emoji))
self.right_emoji.setPixmap(self._convert_cv_to_qt(result.right_emoji))

def _convert_cv_to_qt(self, cv_img):
"""convert cv2 img to qpixmap for display in qlabel"""
if cv_img is None:
return QPixmap()
if len(cv_img.shape) == 2: # Grayscale
Expand All @@ -89,19 +90,7 @@ def _convert_cv_to_qt(self, cv_img):
return QPixmap.fromImage(qt_image)


def closeEvent(self, event):
def close_event(self, event):
"""handle close event to release resources"""
self.gesture_recognizer.release()
event.accept()
"""
def main():
from PyQt5.QtWidgets import QApplication
import sys
app = QApplication(sys.argv)
window = HandGestureRecognitionPage()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
"""
Loading

0 comments on commit 9d24949

Please sign in to comment.