Skip to content

Commit

Permalink
Merge branch 'main' into patch-1
Browse files Browse the repository at this point in the history
  • Loading branch information
tanishaness authored Nov 4, 2024
2 parents 00d5891 + 2bf2f72 commit 782b4cd
Show file tree
Hide file tree
Showing 10 changed files with 1,461 additions and 91 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/issue_open_close.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ name: Issue Auto Comment
# Created by @smog-root

on:
issues:
issues:write
types: [opened, closed]

jobs:
Expand All @@ -25,4 +25,4 @@ jobs:
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
issue_number: ${{ github.event.issue.number }}
body: " ✅ This issue has been successfully closed. Thank you for your contribution and helping us improve the project! If you have any more ideas or run into other issues, feel free to open a new one. Happy coding! 🚀"
body: " ✅ This issue has been successfully closed. Thank you for your contribution and helping us improve the project! If you have any more ideas or run into other issues, feel free to open a new one. Happy coding! 🚀"
46 changes: 25 additions & 21 deletions Backend/object_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import json

# Load the SSD MobileNet V2 model from TensorFlow Hub
def load_model():
Expand All @@ -13,35 +14,34 @@ def load_model():
print("Error loading model:", e)
return None

# Load class names from an external file
def load_class_names(filename="class_names.json"):
with open(filename) as f:
return json.load(f)

# Function to perform object detection on an image tensor
def detect_objects(model, image):
# Resize and prepare image tensor
image_resized = cv2.resize(image, (320, 320))
h, w = image.shape[:2]
scale_factor = 320 / max(h, w)
image_resized = cv2.resize(image, (int(w * scale_factor), int(h * scale_factor)))
input_tensor = tf.convert_to_tensor(image_resized, dtype=tf.uint8)
input_tensor = input_tensor[tf.newaxis, ...]

# Run inference
return model(input_tensor)

# Function to map detection class IDs to class names
def get_class_name(class_id, class_names):
return class_names.get(class_id, "Unknown")

# Draw bounding boxes and labels on the image
def draw_boxes(image, boxes, class_ids, scores, class_names, threshold=0.5):
height, width, _ = image.shape
h, w, _ = image.shape
detected_items = []

for i in range(len(scores)):
if scores[i] >= threshold:
box = boxes[i]
ymin, xmin, ymax, xmax = box
xmin = int(xmin * width)
xmax = int(xmax * width)
ymin = int(ymin * height)
ymax = int(ymax * height)
ymin, xmin, ymax, xmax = [int(val * dim) for val, dim in zip(box, [h, w, h, w])]

class_name = get_class_name(class_ids[i], class_names)
class_name = class_names.get(str(class_ids[i]), "Unknown")
confidence = scores[i] * 100

# Draw bounding box and label
Expand All @@ -53,8 +53,17 @@ def draw_boxes(image, boxes, class_ids, scores, class_names, threshold=0.5):
print("Detected items:", ", ".join(set(detected_items)))
return image

# Load and preprocess the image
def load_image(image_path):
image = cv2.imread(image_path)
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if image is not None else None

# Main function for object detection
def main(image_path, model, class_names, threshold=0.5, save_output=False):
def main(image_path, class_names_file="class_names.json", threshold=0.5, save_output=False):
# Load model and class names
model = load_model()
class_names = load_class_names(class_names_file)

# Load image
image = load_image(image_path)
if image is None:
Expand All @@ -81,10 +90,5 @@ def main(image_path, model, class_names, threshold=0.5, save_output=False):

# Run detection with threshold and save option
if __name__ == "__main__":
class_names = {
1: 'person', 2: 'bicycle', 3: 'car', 4: 'motorcycle', 64: 'laptop', 67: 'cell phone'
}
model = load_model()
if model:
image_path = "test-image2.jpg" # Adjust image path as needed
main(image_path, model, class_names, threshold=0.5, save_output=True)
image_path = "test-image2.jpg" # Adjust image path as needed
main(image_path, threshold=0.5, save_output=True)
110 changes: 79 additions & 31 deletions Backend/peer_comparison_tool.py
Original file line number Diff line number Diff line change
@@ -1,76 +1,117 @@
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

# Define a Student class to initialize student data and track changes over time
class Student:
def __init__(self, name, study_hours, participation_rate, assignment_completion):
self.name = name
self.study_hours = study_hours
self.participation_rate = participation_rate
self.assignment_completion = assignment_completion
self.study_hours = [study_hours]
self.participation_rate = [participation_rate]
self.assignment_completion = [assignment_completion]

# Method to update metrics for longitudinal tracking
def update_metrics(self, study_hours, participation_rate, assignment_completion):
self.study_hours.append(study_hours)
self.participation_rate.append(participation_rate)
self.assignment_completion.append(assignment_completion)

# Calculate average metrics for the student across all timepoints
def average_metrics(self):
return {
"average_study_hours": np.mean(self.study_hours),
"average_participation_rate": np.mean(self.participation_rate),
"average_assignment_completion": np.mean(self.assignment_completion)
}

# Calculate overall averages for the latest metrics among all students
def calculate_averages(students):
study_hours = np.array([s.study_hours for s in students])
participation_rates = np.array([s.participation_rate for s in students])
assignment_completions = np.array([s.assignment_completion for s in students])
study_hours = np.array([s.study_hours[-1] for s in students])
participation_rates = np.array([s.participation_rate[-1] for s in students])
assignment_completions = np.array([s.assignment_completion[-1] for s in students])

return {
"average_study_hours": np.mean(study_hours),
"average_participation_rate": np.mean(participation_rates),
"average_assignment_completion": np.mean(assignment_completions)
}

def identify_outliers(students, averages):
# Identify outliers based on a customizable threshold (default is 1.5)
def identify_outliers(students, averages, threshold=1.5):
outliers = []
threshold = 1.5

for student in students:
if (abs(student.study_hours - averages["average_study_hours"]) > threshold * np.std([s.study_hours for s in students])):
# Detect outliers in each metric based on standard deviation
if (abs(student.study_hours[-1] - averages["average_study_hours"]) > threshold * np.std([s.study_hours[-1] for s in students])):
outliers.append((student.name, 'Study Hours'))
if (abs(student.participation_rate - averages["average_participation_rate"]) > threshold * np.std([s.participation_rate for s in students])):
if (abs(student.participation_rate[-1] - averages["average_participation_rate"]) > threshold * np.std([s.participation_rate[-1] for s in students])):
outliers.append((student.name, 'Participation Rate'))
if (abs(student.assignment_completion - averages["average_assignment_completion"]) > threshold * np.std([s.assignment_completion for s in students])):
if (abs(student.assignment_completion[-1] - averages["average_assignment_completion"]) > threshold * np.std([s.assignment_completion[-1] for s in students])):
outliers.append((student.name, 'Assignment Completion'))

return outliers

# Calculate Peer Comparison Index (PCI) as the average of individual average metrics
def calculate_peer_comparsion_index(students):
scores = []
for student in students:
avg_metrics = student.average_metrics()
score = (avg_metrics["average_study_hours"] + avg_metrics["average_participation_rate"] + avg_metrics["average_assignment_completion"]) / 3
scores.append((student.name, score))
return scores

# Visualize student metrics with averages and outliers highlighted
def visualize_results(students, averages, outliers):
behaviors = ['Study Hours', 'Participation Rate', 'Assignment Completion']

# Create a figure
# Set up figure for the bar chart
plt.figure(figsize=(12, 6))
bar_width = 0.2 # Width for each student's bars
x_indices = np.arange(len(behaviors)) # X positions for bars
avg_colors = ['red', 'blue', 'green'] # Colors for average lines

# Set the width of each bar
bar_width = 0.2

# X positions for each behavior
x_indices = np.arange(len(behaviors))

# Define colors for average lines
avg_colors = ['red', 'blue', 'green']

# Create bars for each student
# Create bar chart for each student's latest metrics
for i, student in enumerate(students):
plt.bar(x_indices + (i * bar_width),
[student.study_hours, student.participation_rate, student.assignment_completion],
[student.study_hours[-1], student.participation_rate[-1], student.assignment_completion[-1]],
width=bar_width, label=student.name)

# Add dotted lines for averages with different colors
# Add lines for average values in each metric category
for i, (avg, color) in enumerate(zip(
[averages['average_study_hours'], averages['average_participation_rate'], averages['average_assignment_completion']],
avg_colors)):
plt.axhline(y=avg, color=color, linestyle='--', label=f'Average {behaviors[i]}')

# Set the labels, title, and ticks
plt.title('Student Behaviors and Averages')
plt.xlabel('Behaviors')
plt.ylabel('Values')
plt.xticks(x_indices + bar_width * (len(students) - 1) / 2, behaviors)
plt.legend()

# Show the plot
plt.tight_layout()
plt.show()

# Generate a report in CSV format containing average metrics and outlier status
def generate_report(students, averages, outliers):
report_data = {
"Name": [],
"Average Study Hours": [],
"Average Participation Rate": [],
"Average Assignment Completion": [],
"Outlier Status": []
}

for student in students:
avg_metrics = student.average_metrics()
report_data["Name"].append(student.name)
report_data["Average Study Hours"].append(avg_metrics["average_study_hours"])
report_data["Average Participation Rate"].append(avg_metrics["average_participation_rate"])
report_data["Average Assignment Completion"].append(avg_metrics["average_assignment_completion"])
report_data["Outlier Status"].append("Outlier" if any(name == student.name for name, _ in outliers) else "Normal")

df = pd.DataFrame(report_data)
df.to_csv('student_report.csv', index=False)
print("\nReport generated: student_report.csv")

# Helper function to handle user input with a default value option
def get_float_input(prompt, default_value):
while True:
try:
Expand All @@ -81,10 +122,11 @@ def get_float_input(prompt, default_value):
except ValueError:
print("Invalid input. Please enter a valid number.")

# Main function to initialize student data, calculate metrics, identify outliers, and generate reports
def main():
students = []

# User input for student data
# Collect user input for each student
while True:
name = input("Enter student name (or 'done' to finish): ")
if name.lower() == 'done':
Expand All @@ -107,11 +149,17 @@ def main():
print("\nOutliers:")
for name, behavior in outliers:
print(f"{name} is an outlier in {behavior}")

# Visualize results

peer_comparsion_scores = calculate_peer_comparsion_index(students)
print("\nPeer Comparison Index (PCI):")
for name, score in peer_comparsion_scores:
print(f"{name}: {score:.2f}")

generate_report(students, averages, outliers)
visualize_results(students, averages, outliers)
else:
print("No student data entered.")

# Run the main function
if __name__ == "__main__":
main()
Loading

0 comments on commit 782b4cd

Please sign in to comment.