Skip to content

Commit

Permalink
Merge pull request #768 from Lunatic-Labs/SKIL-491
Browse files Browse the repository at this point in the history
SKIL-491
  • Loading branch information
aparriaran authored Nov 19, 2024
2 parents b745964 + b756511 commit debfb18
Show file tree
Hide file tree
Showing 4 changed files with 288 additions and 211 deletions.
339 changes: 197 additions & 142 deletions BackEndFlask/Functions/exportCsv.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,17 @@
# and returns to a csv file to a customer.
#
# NOTE:
# the current way to write out things is as follows:
# AT_name, RN(AT_type, AT_completer), TeamName, IndividualName, CompDate, Category, datapoint
# / \ |
# unitofasess... roleid rating,oc,sfi
# The Template method pattern was used since both exports deal with similar related data fetches
# from the data base but just handle formating differently. This is hopefully expandable.
#----------------------------------------------------------------------------------------------------
import csv
import io
from core import app
from models.queries import *
from enum import Enum
from datetime import datetime
from abc import ABC, abstractmethod


def rounded_hours_difference(completed: datetime, seen: datetime) -> int:
"""
Expand All @@ -29,19 +29,21 @@ def rounded_hours_difference(completed: datetime, seen: datetime) -> int:
Return:
Result: int (The lag_time between completed and seen)
Exception:
TypeError: Both arguements must be datetimes.
"""

if seen is None:
return "" # If the feedback hasnt been viewed return an empty string for proper calculation
if not isinstance(seen, datetime): raise TypeError(f"Expected: {datetime}, got {type(seen)} for seen.")
if not isinstance (completed, datetime): raise TypeError(f"Expected: {datetime}, got {type(completed)} for completed.")

time_delta = seen - completed

hours_remainder = divmod( divmod( time_delta.total_seconds(), 60 )[0], 60)

return int(hours_remainder[0]) if hours_remainder[1] < 30.0 else int(hours_remainder[0]) + 1


class Csv_data(Enum):
class Csv_Data(Enum):
"""
Description:
Locations associated to where they are in the json file.
Expand All @@ -60,149 +62,202 @@ class Csv_data(Enum):

AT_COMPLETER = 4

TEAM_NAME = 5
TEAM_ID = 5

FIRST_NAME = 6
TEAM_NAME = 6

LAST_NAME = 7
USER_ID = 7

COMP_DATE = 8
FIRST_NAME = 8

LAG_TIME = 9
LAST_NAME = 9

NOTIFICATION = 10
COMP_DATE = 10

JSON = 11
LAG_TIME = 11

NOTIFICATION = 12

JSON = 13

def create_csv(at_id: int) -> str:
class Csv_Creation(ABC):
"""
Description:
Creates the csv file and dumps info in to it.
File name follows the convention: [0-9]*.csv
Description: Abstract class that leads to the creation of a csv formated string.
Decided to do this pattern becasue the clients seem to want this same data in many
differing formated outputs. This way all that needs to get created with the same data is
the format rather than working on the query and repeating the same starter code.
"""

def __init__(self, at_id) -> None:
"""
Parameters:
at_id: <class 'int'>
"""
self._at_id = at_id
self._csv_file = io.StringIO()
self._writer = csv.writer(self._csv_file, delimiter='\t')
self._completed_assessment_data = None
self._oc_sfi_data = None
self._is_teams = False
self._singular_data = None

def return_csv_str(self) -> str:
"""
Description: Returns a csv formated string.
Return:
<class 'str'>
"""

# Writting a common identifying data.
self._writer.writerow(['\ufeff']) # A dom that helps excel auto use utf-8. Downside is that it uses up a line.
self._writer.writerow(["Course Name"])
self._writer.writerow([get_course_name_by_at_id(self._at_id)])
self._writer.writerow([' '])


# List of dicts: Each list is another individual in the AT and the dict is there related data.
self._completed_assessment_data = get_csv_data_by_at_id(self._at_id)

if len(self._completed_assessment_data) == 0:
return self._csv_file.getvalue()

self._singular = self._completed_assessment_data[0]
self._is_teams = False if self._singular[Csv_Data.TEAM_NAME.value] == None else True

self._format()

return self._csv_file.getvalue()

def __del__(self) -> None:
"""
Description: Freeing resources.
"""
self._csv_file.close()

@abstractmethod
def _format(self) -> None:
pass

class Ratings_Csv(Csv_Creation):
"""
Description: Singleton that creates a csv string of ratings.
"""
def __init__(self, at_id:int) -> None:
"""
Parameters:
at_id: <class 'int'>
"""
super().__init__(at_id)

def _format(self) -> None:
"""
Description: Formats the data in the csv string.
Exceptions: None except what IO can rise.
"""
column_name = ["First Name"] + ["Last Name"] if not self._is_teams else ["Team Name"]

# Adding the column name. Noitice that done and comments is skipped since they are categories but are not important.
column_name += [i for i in self._singular[Csv_Data.JSON.value] if (i != "done" and i !="comments")]

column_name += ["Lag Time"]

self._writer.writerow(column_name)

row_info = None

# Notice that in the list comphrehensions done and comments are skiped since they are categories but dont hold relavent data.
for individual in self._completed_assessment_data:

row_info = [individual[Csv_Data.FIRST_NAME.value]] + [individual[Csv_Data.LAST_NAME.value]] if not self._is_teams else [individual[Csv_Data.TEAM_NAME.value]]

row_info += [individual[Csv_Data.JSON.value][category]["rating"] for category in individual[Csv_Data.JSON.value] if (category != "done" and category !="comments")]

lag = [" "]
try:
# Possible that a particular individual has not yet seen so its a Nonetype in the backend.
lag = [rounded_hours_difference(individual[Csv_Data.COMP_DATE.value], individual[Csv_Data.LAG_TIME.value])]
except TypeError:
pass

row_info += lag
self._writer.writerow(row_info)

class Ocs_Sfis_Csv(Csv_Creation):
"""
Description: Singleton that creates a csv string of ratings.
"""
def __init__(self, at_id: int) -> None:
"""
Parameters:
at_id: <class 'int'>
"""
super().__init__(at_id)
self.__checkmark = '\u2713'
self.__crossmark = " "

def _format(self) -> None:
"""
Description: Formats the data in the csv string.
Exceptions: None except what IO can rise.
"""
# Writing out in category chuncks.
for category in self._singular[Csv_Data.JSON.value]:
if category == "done" or category == "comments": # Yes those two are "categories" at least from how the data is pulled.
continue

headers = ["First Name"] + ["Last Name"] if not self._is_teams else ["Team Name"]

oc_sfi_per_category = get_csv_categories(self._singular[Csv_Data.RUBRIC_ID.value],
self._singular[Csv_Data.USER_ID.value],
self._singular[Csv_Data.TEAM_ID.value],
self._at_id, category)

# Adding the other column names which are the ocs and sfi text.
headers += ["OC:" + i[0] for i in oc_sfi_per_category[0]] + ["SFI:" + i[0] for i in oc_sfi_per_category[1]]

self._writer.writerow([category])
self._writer.writerow(headers)

# Writing the checkmarks.
for individual in self._completed_assessment_data:
respective_ocs_sfis = [individual[Csv_Data.JSON.value][category]["observable_characteristics"],
individual[Csv_Data.JSON.value][category]["suggestions"]]

row = None
if not self._is_teams: row = [individual[Csv_Data.FIRST_NAME.value]] + [individual[Csv_Data.LAST_NAME.value]]
else: row = [individual[Csv_Data.TEAM_NAME.value]]

for bits in respective_ocs_sfis:
row += [self.__checkmark if i == "1" else self.__crossmark for i in bits]

self._writer.writerow(row)
self._writer.writerow([''])

class CSV_Type(Enum):
"""
Description: This is the enum for the different types of csv file formats the clients have requested.
"""
RATING_CSV = 0
OCS_SFI_CSV = 1

def create_csv_strings(at_id:int, type_csv=CSV_Type.OCS_SFI_CSV.value) -> str:
"""
Description: Creates a csv file with the data in the format specified by type_csv.
Parameters:
at_id: int (The id of an assessment task)
at_id: <class 'int'> (Desired assessment task)
type_csv: <class 'int'> (Desired format)
Returns:
<class 'str'>
Return:
str
Exceptions: None except the chance the database or IO calls raise one.
"""
# Assessment_task_name, Completion_date, Rubric_name, AT_type (Team/individual), AT_completer_role (Admin, TA/Instructor, Student), Notification_date
with app.app_context():
with io.StringIO() as csvFile:
writer = csv.writer(csvFile, quoting=csv.QUOTE_MINIMAL)

completed_assessment_data = get_csv_data_by_at_id(at_id)

# Next line is the header line and its values.
writer.writerow(
["Assessment_task_name"] +
["Completion_date"]+
["Rubric_name"]+
["AT_type (Team/individual)"] +
["AT_completer_role (Admin[TA/Instructor] / Student)"] +
["Notification_data"]
)

if len(completed_assessment_data) == 0:
return csvFile.getvalue()

completed_assessment_data = get_csv_data_by_at_id(at_id)

if len(completed_assessment_data) == 0:
return csvFile.getvalue()

writer.writerow(
[completed_assessment_data[0][Csv_data.AT_NAME.value]] +
[completed_assessment_data[0][Csv_data.COMP_DATE.value]] +
[completed_assessment_data[0][Csv_data.RUBRIC_NAME.value]] +
["Team" if completed_assessment_data[0][Csv_data.AT_TYPE.value] else "Individual"] +
[completed_assessment_data[0][Csv_data.AT_COMPLETER.value]] +
[completed_assessment_data[0][Csv_data.NOTIFICATION.value]]
)

for entry in completed_assessment_data:
sfi_oc_data = get_csv_categories(entry[Csv_data.RUBRIC_ID.value])

lag = ""

try:
lag = rounded_hours_difference(entry[Csv_data.COMP_DATE.value], entry[Csv_data.LAG_TIME.value])
except:
pass

for i in entry[Csv_data.JSON.value]:
if i == "comments" or i == "done":
continue

oc = entry[Csv_data.JSON.value][i]["observable_characteristics"]

for j in range(0, len(oc)):
if(oc[j] == '0'):
continue

# The block generates data lines.
writer.writerow(
["Team_name"] +
["First name"] +
["last name"] +
["Category"] +
["Rating"] +
["Observable Characteristics"] +
["Suggestions for Improvement"] +
["feedback time lag"]
)

for entry in completed_assessment_data:
sfi_oc_data = get_csv_categories(entry[Csv_data.RUBRIC_ID.value])

lag = ""

try:
lag = rounded_hours_difference(entry[Csv_data.COMP_DATE.value], entry[Csv_data.LAG_TIME.value])
except:
pass

for i in entry[Csv_data.JSON.value]:
if i == "comments" or i == "done":
continue

oc = entry[Csv_data.JSON.value][i]["observable_characteristics"]

for j in range (0, len(oc)):
if(oc[j] == '0'):
continue

writer.writerow(
[entry[Csv_data.TEAM_NAME.value]] +
[entry[Csv_data.FIRST_NAME.value]] +
[entry[Csv_data.LAST_NAME.value]] +
[i] +
[entry[Csv_data.JSON.value][i]["rating"]] +
[sfi_oc_data[1][j][1]] +
[""] +
[lag]
)

for i in entry[Csv_data.JSON.value]:
if i == "comments" or i == "done":
continue

sfi = entry[Csv_data.JSON.value][i]["suggestions"]

for j in range (0, len(sfi)):
if(sfi[j] == '0'):
continue

writer.writerow(
[entry[Csv_data.TEAM_NAME.value]] +
[entry[Csv_data.FIRST_NAME.value]] +
[entry[Csv_data.LAST_NAME.value]] +
[i] +
[entry[Csv_data.JSON.value][i]["rating"]] +
[""] +
[sfi_oc_data[0][j][1]] +
[lag]
)

return csvFile.getvalue()
match type_csv:
case CSV_Type.RATING_CSV.value:
return Ratings_Csv(at_id).return_csv_str()
case CSV_Type.OCS_SFI_CSV.value:
return Ocs_Sfis_Csv(at_id).return_csv_str()
case _:
return "No current class meets the deisred csv format. Error in create_csv_strings()."
Loading

0 comments on commit debfb18

Please sign in to comment.