diff --git a/BackEndFlask/Functions/exportCsv.py b/BackEndFlask/Functions/exportCsv.py index b235d1ec9..78a981c12 100644 --- a/BackEndFlask/Functions/exportCsv.py +++ b/BackEndFlask/Functions/exportCsv.py @@ -4,10 +4,8 @@ # and returns to a csv file to a customer. # # NOTE: -# the current way to write out things is as follows: -# AT_name, RN(AT_type, AT_completer), TeamName, IndividualName, CompDate, Category, datapoint -# / \ | -# unitofasess... roleid rating,oc,sfi +# The Template method pattern was used since both exports deal with similar related data fetches +# from the data base but just handle formating differently. This is hopefully expandable. #---------------------------------------------------------------------------------------------------- import csv import io @@ -15,6 +13,8 @@ from models.queries import * from enum import Enum from datetime import datetime +from abc import ABC, abstractmethod + def rounded_hours_difference(completed: datetime, seen: datetime) -> int: """ @@ -29,19 +29,21 @@ def rounded_hours_difference(completed: datetime, seen: datetime) -> int: Return: Result: int (The lag_time between completed and seen) + + Exception: + TypeError: Both arguements must be datetimes. """ - if seen is None: - return "" # If the feedback hasnt been viewed return an empty string for proper calculation - + if not isinstance(seen, datetime): raise TypeError(f"Expected: {datetime}, got {type(seen)} for seen.") + if not isinstance (completed, datetime): raise TypeError(f"Expected: {datetime}, got {type(completed)} for completed.") + time_delta = seen - completed hours_remainder = divmod( divmod( time_delta.total_seconds(), 60 )[0], 60) return int(hours_remainder[0]) if hours_remainder[1] < 30.0 else int(hours_remainder[0]) + 1 - -class Csv_data(Enum): +class Csv_Data(Enum): """ Description: Locations associated to where they are in the json file. @@ -60,149 +62,202 @@ class Csv_data(Enum): AT_COMPLETER = 4 - TEAM_NAME = 5 + TEAM_ID = 5 - FIRST_NAME = 6 + TEAM_NAME = 6 - LAST_NAME = 7 + USER_ID = 7 - COMP_DATE = 8 + FIRST_NAME = 8 - LAG_TIME = 9 + LAST_NAME = 9 - NOTIFICATION = 10 + COMP_DATE = 10 - JSON = 11 + LAG_TIME = 11 + + NOTIFICATION = 12 + JSON = 13 -def create_csv(at_id: int) -> str: +class Csv_Creation(ABC): """ - Description: - Creates the csv file and dumps info in to it. - File name follows the convention: [0-9]*.csv + Description: Abstract class that leads to the creation of a csv formated string. + Decided to do this pattern becasue the clients seem to want this same data in many + differing formated outputs. This way all that needs to get created with the same data is + the format rather than working on the query and repeating the same starter code. + """ + + def __init__(self, at_id) -> None: + """ + Parameters: + at_id: + """ + self._at_id = at_id + self._csv_file = io.StringIO() + self._writer = csv.writer(self._csv_file, delimiter='\t') + self._completed_assessment_data = None + self._oc_sfi_data = None + self._is_teams = False + self._singular_data = None + + def return_csv_str(self) -> str: + """ + Description: Returns a csv formated string. + + Return: + + """ + + # Writting a common identifying data. + self._writer.writerow(['\ufeff']) # A dom that helps excel auto use utf-8. Downside is that it uses up a line. + self._writer.writerow(["Course Name"]) + self._writer.writerow([get_course_name_by_at_id(self._at_id)]) + self._writer.writerow([' ']) + + + # List of dicts: Each list is another individual in the AT and the dict is there related data. + self._completed_assessment_data = get_csv_data_by_at_id(self._at_id) + + if len(self._completed_assessment_data) == 0: + return self._csv_file.getvalue() + + self._singular = self._completed_assessment_data[0] + self._is_teams = False if self._singular[Csv_Data.TEAM_NAME.value] == None else True + + self._format() + + return self._csv_file.getvalue() + + def __del__(self) -> None: + """ + Description: Freeing resources. + """ + self._csv_file.close() + + @abstractmethod + def _format(self) -> None: + pass + +class Ratings_Csv(Csv_Creation): + """ + Description: Singleton that creates a csv string of ratings. + """ + def __init__(self, at_id:int) -> None: + """ + Parameters: + at_id: + """ + super().__init__(at_id) + + def _format(self) -> None: + """ + Description: Formats the data in the csv string. + Exceptions: None except what IO can rise. + """ + column_name = ["First Name"] + ["Last Name"] if not self._is_teams else ["Team Name"] + + # Adding the column name. Noitice that done and comments is skipped since they are categories but are not important. + column_name += [i for i in self._singular[Csv_Data.JSON.value] if (i != "done" and i !="comments")] + + column_name += ["Lag Time"] + + self._writer.writerow(column_name) + + row_info = None + + # Notice that in the list comphrehensions done and comments are skiped since they are categories but dont hold relavent data. + for individual in self._completed_assessment_data: + + row_info = [individual[Csv_Data.FIRST_NAME.value]] + [individual[Csv_Data.LAST_NAME.value]] if not self._is_teams else [individual[Csv_Data.TEAM_NAME.value]] + + row_info += [individual[Csv_Data.JSON.value][category]["rating"] for category in individual[Csv_Data.JSON.value] if (category != "done" and category !="comments")] + + lag = [" "] + try: + # Possible that a particular individual has not yet seen so its a Nonetype in the backend. + lag = [rounded_hours_difference(individual[Csv_Data.COMP_DATE.value], individual[Csv_Data.LAG_TIME.value])] + except TypeError: + pass + + row_info += lag + self._writer.writerow(row_info) + +class Ocs_Sfis_Csv(Csv_Creation): + """ + Description: Singleton that creates a csv string of ratings. + """ + def __init__(self, at_id: int) -> None: + """ + Parameters: + at_id: + """ + super().__init__(at_id) + self.__checkmark = '\u2713' + self.__crossmark = " " + + def _format(self) -> None: + """ + Description: Formats the data in the csv string. + Exceptions: None except what IO can rise. + """ + # Writing out in category chuncks. + for category in self._singular[Csv_Data.JSON.value]: + if category == "done" or category == "comments": # Yes those two are "categories" at least from how the data is pulled. + continue + + headers = ["First Name"] + ["Last Name"] if not self._is_teams else ["Team Name"] + + oc_sfi_per_category = get_csv_categories(self._singular[Csv_Data.RUBRIC_ID.value], + self._singular[Csv_Data.USER_ID.value], + self._singular[Csv_Data.TEAM_ID.value], + self._at_id, category) + + # Adding the other column names which are the ocs and sfi text. + headers += ["OC:" + i[0] for i in oc_sfi_per_category[0]] + ["SFI:" + i[0] for i in oc_sfi_per_category[1]] + + self._writer.writerow([category]) + self._writer.writerow(headers) + + # Writing the checkmarks. + for individual in self._completed_assessment_data: + respective_ocs_sfis = [individual[Csv_Data.JSON.value][category]["observable_characteristics"], + individual[Csv_Data.JSON.value][category]["suggestions"]] + + row = None + if not self._is_teams: row = [individual[Csv_Data.FIRST_NAME.value]] + [individual[Csv_Data.LAST_NAME.value]] + else: row = [individual[Csv_Data.TEAM_NAME.value]] + + for bits in respective_ocs_sfis: + row += [self.__checkmark if i == "1" else self.__crossmark for i in bits] + + self._writer.writerow(row) + self._writer.writerow(['']) + +class CSV_Type(Enum): + """ + Description: This is the enum for the different types of csv file formats the clients have requested. + """ + RATING_CSV = 0 + OCS_SFI_CSV = 1 + +def create_csv_strings(at_id:int, type_csv=CSV_Type.OCS_SFI_CSV.value) -> str: + """ + Description: Creates a csv file with the data in the format specified by type_csv. Parameters: - at_id: int (The id of an assessment task) + at_id: (Desired assessment task) + type_csv: (Desired format) + + Returns: + - Return: - str + Exceptions: None except the chance the database or IO calls raise one. """ - # Assessment_task_name, Completion_date, Rubric_name, AT_type (Team/individual), AT_completer_role (Admin, TA/Instructor, Student), Notification_date - with app.app_context(): - with io.StringIO() as csvFile: - writer = csv.writer(csvFile, quoting=csv.QUOTE_MINIMAL) - - completed_assessment_data = get_csv_data_by_at_id(at_id) - - # Next line is the header line and its values. - writer.writerow( - ["Assessment_task_name"] + - ["Completion_date"]+ - ["Rubric_name"]+ - ["AT_type (Team/individual)"] + - ["AT_completer_role (Admin[TA/Instructor] / Student)"] + - ["Notification_data"] - ) - - if len(completed_assessment_data) == 0: - return csvFile.getvalue() - - completed_assessment_data = get_csv_data_by_at_id(at_id) - - if len(completed_assessment_data) == 0: - return csvFile.getvalue() - - writer.writerow( - [completed_assessment_data[0][Csv_data.AT_NAME.value]] + - [completed_assessment_data[0][Csv_data.COMP_DATE.value]] + - [completed_assessment_data[0][Csv_data.RUBRIC_NAME.value]] + - ["Team" if completed_assessment_data[0][Csv_data.AT_TYPE.value] else "Individual"] + - [completed_assessment_data[0][Csv_data.AT_COMPLETER.value]] + - [completed_assessment_data[0][Csv_data.NOTIFICATION.value]] - ) - - for entry in completed_assessment_data: - sfi_oc_data = get_csv_categories(entry[Csv_data.RUBRIC_ID.value]) - - lag = "" - - try: - lag = rounded_hours_difference(entry[Csv_data.COMP_DATE.value], entry[Csv_data.LAG_TIME.value]) - except: - pass - - for i in entry[Csv_data.JSON.value]: - if i == "comments" or i == "done": - continue - - oc = entry[Csv_data.JSON.value][i]["observable_characteristics"] - - for j in range(0, len(oc)): - if(oc[j] == '0'): - continue - - # The block generates data lines. - writer.writerow( - ["Team_name"] + - ["First name"] + - ["last name"] + - ["Category"] + - ["Rating"] + - ["Observable Characteristics"] + - ["Suggestions for Improvement"] + - ["feedback time lag"] - ) - - for entry in completed_assessment_data: - sfi_oc_data = get_csv_categories(entry[Csv_data.RUBRIC_ID.value]) - - lag = "" - - try: - lag = rounded_hours_difference(entry[Csv_data.COMP_DATE.value], entry[Csv_data.LAG_TIME.value]) - except: - pass - - for i in entry[Csv_data.JSON.value]: - if i == "comments" or i == "done": - continue - - oc = entry[Csv_data.JSON.value][i]["observable_characteristics"] - - for j in range (0, len(oc)): - if(oc[j] == '0'): - continue - - writer.writerow( - [entry[Csv_data.TEAM_NAME.value]] + - [entry[Csv_data.FIRST_NAME.value]] + - [entry[Csv_data.LAST_NAME.value]] + - [i] + - [entry[Csv_data.JSON.value][i]["rating"]] + - [sfi_oc_data[1][j][1]] + - [""] + - [lag] - ) - - for i in entry[Csv_data.JSON.value]: - if i == "comments" or i == "done": - continue - - sfi = entry[Csv_data.JSON.value][i]["suggestions"] - - for j in range (0, len(sfi)): - if(sfi[j] == '0'): - continue - - writer.writerow( - [entry[Csv_data.TEAM_NAME.value]] + - [entry[Csv_data.FIRST_NAME.value]] + - [entry[Csv_data.LAST_NAME.value]] + - [i] + - [entry[Csv_data.JSON.value][i]["rating"]] + - [""] + - [sfi_oc_data[0][j][1]] + - [lag] - ) - - return csvFile.getvalue() + match type_csv: + case CSV_Type.RATING_CSV.value: + return Ratings_Csv(at_id).return_csv_str() + case CSV_Type.OCS_SFI_CSV.value: + return Ocs_Sfis_Csv(at_id).return_csv_str() + case _: + return "No current class meets the deisred csv format. Error in create_csv_strings()." \ No newline at end of file diff --git a/BackEndFlask/controller/Routes/Csv_routes.py b/BackEndFlask/controller/Routes/Csv_routes.py index db91a7e09..cad037ddc 100644 --- a/BackEndFlask/controller/Routes/Csv_routes.py +++ b/BackEndFlask/controller/Routes/Csv_routes.py @@ -1,20 +1,15 @@ #---------------------------------------------------------------------------------------------------- # Developer: Aldo Vera-Espinoza -# Date: 8 May, 2024 +# Date: 14 November, 2024 # File Purpose: -# Creates a way for the front end to ask for a csv file and get a properly filled -# csv sent back. +# Functions to create a csv file for (ocs and sfis) and ratings for a given assessment task. #---------------------------------------------------------------------------------------------------- - -import os -import json -import pandas as pd from flask import request from controller import bp from controller.Route_response import * from flask_jwt_extended import jwt_required from controller.security.CustomDecorators import AuthCheck, bad_token_check -from Functions.exportCsv import create_csv +from Functions.exportCsv import create_csv_strings from models.assessment_task import get_assessment_task from models.user import get_user @@ -26,31 +21,29 @@ def get_completed_assessment_csv() -> dict: """ Description: - Creates a csv that has the following info - in this order respectively. + Creates a csv according to the desired format. Parameter: - assessment_task_id: int + assessment_task_id: int (desired at_id) + format: int (desired data and format for the csv) Return: Response dictionary and possibly the file. """ try: assessment_task_id = request.args.get("assessment_task_id") + format = request.args.get("format") + + if format == None: raise ValueError("Format should be an int.") + format = int(format) - assessment = get_assessment_task(assessment_task_id) # Trigger an error if not exists + get_assessment_task(assessment_task_id) # Trigger an error if not exists user_id = request.args.get("user_id") - user = get_user(user_id) # Trigger an error if not exists - - file_name = user.first_name + "_" - - file_name += user.last_name + "_" - - file_name += assessment.assessment_task_name.replace(" ", "_") + ".csv" + get_user(user_id) # Trigger an error if not exist - csv_data = create_csv(assessment_task_id) + csv_data = create_csv_strings(assessment_task_id, format) return create_good_response({ "csv_data": csv_data.strip() }, 200, "csv_creation") diff --git a/BackEndFlask/models/queries.py b/BackEndFlask/models/queries.py index e4908338a..d53092461 100644 --- a/BackEndFlask/models/queries.py +++ b/BackEndFlask/models/queries.py @@ -1,6 +1,8 @@ from core import db from models.utility import error_log from models.schemas import * +from sqlalchemy.sql import text +from sqlalchemy import func from models.team_user import ( create_team_user, @@ -30,7 +32,10 @@ from sqlalchemy import ( and_, or_, - union + union, + select, + case, + literal_column ) import sqlalchemy @@ -950,21 +955,7 @@ def get_csv_data_by_at_id(at_id: int) -> list[dict[str]]: at_id: int (The id of an assessment task) Return: - list[dict][str] - """ - - """ - Note that the current plan sqlite3 seems to execute is: - QUERY PLAN - |--SCAN CompletedAssessment - |--SEARCH AssessmentTask USING INTEGER PRIMARY KEY (rowid=?) - |--SEARCH Role USING INTEGER PRIMARY KEY (rowid=?) - |--SEARCH Team USING INTEGER PRIMARY KEY (rowid=?) - `--SEARCH User USING INTEGER PRIMARY KEY (rowid=?) - Untested but assume other tables are also runing a search instead of a scan - everywhere where there is no index to scan by. - The problem lies in the search the others are doing. Future speed optimications - can be reached by implementing composite indices. + list[dict][str]: (List of dicts: Each list is another individual in the AT and the dict is there related data.) """ pertinent_assessments = db.session.query( AssessmentTask.assessment_task_name, @@ -972,13 +963,15 @@ def get_csv_data_by_at_id(at_id: int) -> list[dict[str]]: AssessmentTask.rubric_id, Rubric.rubric_name, Role.role_name, + Team.team_id, Team.team_name, + CompletedAssessment.user_id, User.first_name, User.last_name, CompletedAssessment.last_update, Feedback.feedback_time, AssessmentTask.notification_sent, - CompletedAssessment.rating_observable_characteristics_suggestions_data + CompletedAssessment.rating_observable_characteristics_suggestions_data, ).join( Role, AssessmentTask.role_id == Role.role_id, @@ -988,7 +981,7 @@ def get_csv_data_by_at_id(at_id: int) -> list[dict[str]]: ).outerjoin( Team, CompletedAssessment.team_id == Team.team_id - ).join( + ).outerjoin( User, CompletedAssessment.user_id == User.user_id ).join( @@ -1002,18 +995,23 @@ def get_csv_data_by_at_id(at_id: int) -> list[dict[str]]: ) ).filter( AssessmentTask.assessment_task_id == at_id + ).order_by( + User.user_id, ).all() return pertinent_assessments - -def get_csv_categories(rubric_id: int) -> tuple[dict[str],dict[str]]: +def get_csv_categories(rubric_id: int, user_id: int, team_id: int, at_id: int, category_name: str) -> tuple[dict[str],dict[str]]: """ Description: Returns the sfi and the oc data to fill out the csv file. Parameters: rubric_id : int (The id of a rubric) + user_id : int (The id of the current logged student user) + team_id: int (The id of a team) + at_id: int (The id of an assessment task) + category_name : str (The category that the ocs and sfis must relate to.) Return: tuple two Dict [Dict] [str] (All of the sfi and oc data) """ @@ -1023,37 +1021,68 @@ def get_csv_categories(rubric_id: int) -> tuple[dict[str],dict[str]]: for performance reasons later down the road. The decision depends on how the database evolves from now. """ - sfi_data = db.session.query( - RubricCategory.rubric_id, - SuggestionsForImprovement.suggestion_text - ).join( - Category, - Category.category_id == RubricCategory.rubric_category_id - ).outerjoin( - SuggestionsForImprovement, - Category.category_id == SuggestionsForImprovement.category_id - ).filter( - RubricCategory.rubric_id == rubric_id - ).order_by( - RubricCategory.rubric_id - ).all() - oc_data = db.session.query( - RubricCategory.rubric_id, - ObservableCharacteristic.observable_characteristic_text + ocs_sfis_query = [None, None] + + for i in range(0, 2): + ocs_sfis_query[i] = db.session.query( + ObservableCharacteristic.observable_characteristic_text if i == 0 else SuggestionsForImprovement.suggestion_text + ).join( + Category, + (ObservableCharacteristic.category_id if i == 0 else SuggestionsForImprovement.category_id) == Category.category_id + ).join( + RubricCategory, + RubricCategory.category_id == Category.category_id + ).join( + AssessmentTask, + AssessmentTask.rubric_id == RubricCategory.rubric_id + ).join( + CompletedAssessment, + CompletedAssessment.assessment_task_id == AssessmentTask.assessment_task_id + ).filter( + Category.category_name == category_name, + CompletedAssessment.user_id == user_id, + AssessmentTask.assessment_task_id == at_id, + RubricCategory.rubric_id == rubric_id, + ).order_by( + ObservableCharacteristic.observable_characteristics_id if i == 0 else SuggestionsForImprovement.suggestion_id + ) + + if team_id is not None : ocs_sfis_query[i].filter(CompletedAssessment.team_id == team_id) + + # Executing the query + ocs = ocs_sfis_query[0].all() + sfis = ocs_sfis_query[1].all() + + return ocs,sfis + +def get_course_name_by_at_id(at_id:int) -> str : + """ + Description: + Returns a string of the course name associated to the assessment_task_id. + + Parameters: + at_id: int (The assessment_task_id that you want the course name of.) + + Returns: + Course name as a string. + + Exceptions: + None except the ones sqlalchemy + flask may raise. + """ + + course_name = db.session.query( + Course.course_name ).join( - Category, - Category.category_id == RubricCategory.rubric_category_id - ).outerjoin( - ObservableCharacteristic, - Category.category_id == ObservableCharacteristic.category_id + AssessmentTask, + AssessmentTask.course_id == Course.course_id ).filter( - RubricCategory.rubric_id == rubric_id - ).order_by( - RubricCategory.rubric_id + AssessmentTask.assessment_task_id == at_id ).all() - return sfi_data,oc_data + return course_name[0][0] + + def get_completed_assessment_ratio(course_id: int, assessment_task_id: int) -> int: diff --git a/FrontEndReact/src/View/Admin/View/ViewAssessmentTask/ViewAssessmentTasks.js b/FrontEndReact/src/View/Admin/View/ViewAssessmentTask/ViewAssessmentTasks.js index 5da840d90..02e3e0aad 100644 --- a/FrontEndReact/src/View/Admin/View/ViewAssessmentTask/ViewAssessmentTasks.js +++ b/FrontEndReact/src/View/Admin/View/ViewAssessmentTask/ViewAssessmentTasks.js @@ -26,7 +26,7 @@ class ViewAssessmentTasks extends Component { this.handleDownloadCsv = (atId, exportButtonId, assessmentTaskIdToAssessmentTaskName) => { let promise = genericResourceGET( - `/csv_assessment_export?assessment_task_id=${atId}`, + `/csv_assessment_export?assessment_task_id=${atId}&format=1`, "csv_creation", this, {dest: "csvCreation"} @@ -52,13 +52,13 @@ class ViewAssessmentTasks extends Component { if(this.state.isLoaded && this.state.csvCreation) { const fileData = this.state.csvCreation["csv_data"]; - const blob = new Blob([fileData], { type: 'csv' }); + const blob = new Blob([fileData], { type: 'text/csv;charset=utf-8;' }); const url = URL.createObjectURL(blob); const link = document.createElement("a"); link.download = this.state.downloadedAssessment + ".csv"; link.href = url; - link.setAttribute('download', 'export.csv'); + link.setAttribute('download', this.props.navbar.state.chosenCourse['course_name']+'.csv'); link.click(); var assessmentName = this.state.downloadedAssessment;