Skip to content

Commit

Permalink
Merge branch 'edge' into non_concurrent_action_dispatch
Browse files Browse the repository at this point in the history
  • Loading branch information
SyntaxColoring committed Sep 18, 2024
2 parents 45e14ca + 924a2e3 commit f15b987
Show file tree
Hide file tree
Showing 1,082 changed files with 27,443 additions and 10,981 deletions.
5 changes: 4 additions & 1 deletion .github/workflows/pd-test-build-deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ jobs:
OT_PD_MIXPANEL_ID: ${{ secrets.OT_PD_MIXPANEL_ID }}
OT_PD_MIXPANEL_DEV_ID: ${{ secrets.OT_PD_MIXPANEL_DEV_ID }}
run: |
make -C protocol-designer
make -C protocol-designer NODE_ENV=development
- name: 'upload github artifact'
uses: actions/upload-artifact@v3
with:
Expand Down Expand Up @@ -215,4 +215,7 @@ jobs:
aws configure set role_arn ${{ secrets.OT_PD_DEPLOY_ROLE }} --profile deploy
aws configure set source_profile identity --profile deploy
aws s3 sync ./dist s3://sandbox.designer.opentrons.com/${{ env.OT_BRANCH }} --acl=public-read --profile=deploy
# invalidate both sandbox.opentrons.com and www.sandbox.opentrons.com cloudfront caches
aws cloudfront create-invalidation --distribution-id ${{ secrets.PD_CLOUDFRONT_SANDBOX_DISTRIBUTION_ID }} --paths "/*" --profile deploy
aws cloudfront create-invalidation --distribution-id ${{ secrets.PD_CLOUDFRONT_SANDBOX_WWW_DISTRIBUTION_ID }} --paths "/*" --profile deploy
shell: bash
6 changes: 3 additions & 3 deletions .github/workflows/react-api-client-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ env:

jobs:
js-unit-test:
name: 'react-api-client unit tests'
name: 'api-client and react-api-client unit tests'
timeout-minutes: 30
runs-on: 'ubuntu-22.04'
steps:
Expand All @@ -59,8 +59,8 @@ jobs:
npm config set cache ./.npm-cache
yarn config set cache-folder ./.yarn-cache
make setup-js
- name: 'run react-api-client unit tests'
run: make -C react-api-client test-cov
- name: 'run api-client and react-api-client unit tests'
run: make -C api-client test-cov
- name: 'Upload coverage report'
uses: codecov/codecov-action@v3
with:
Expand Down
1,081 changes: 528 additions & 553 deletions abr-testing/Pipfile.lock

Large diffs are not rendered by default.

11 changes: 6 additions & 5 deletions abr-testing/abr_testing/automation/google_sheets_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def batch_delete_rows(self, row_indices: List[int]) -> None:
def batch_update_cells(
self,
data: List[List[Any]],
start_column: str,
start_column_index: Any,
start_row: int,
sheet_id: str,
) -> None:
Expand All @@ -132,7 +132,8 @@ def column_letter_to_index(column_letter: str) -> int:

requests = []
user_entered_value: Dict[str, Any] = {}
start_column_index = column_letter_to_index(start_column) - 1
if type(start_column_index) == str:
start_column_index = column_letter_to_index(start_column_index) - 1

for col_offset, col_values in enumerate(data):
column_index = start_column_index + col_offset
Expand Down Expand Up @@ -223,9 +224,9 @@ def get_sheet_by_name(self, title: str) -> None:
)

def token_check(self) -> None:
"""Check if still credentials are still logged in."""
if self.credentials.access_token_expired:
self.gc.login()
"""Check if credentials are still valid and refresh if expired."""
if self.credentials.expired:
self.credentials.refresh() # Refresh the credentials

def get_row_index_with_value(self, some_string: str, col_num: int) -> Any:
"""Find row index of string by looking in specific column."""
Expand Down
8 changes: 6 additions & 2 deletions abr-testing/abr_testing/automation/jira_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,12 @@ def issues_on_board(self, project_key: str) -> List[List[Any]]:
def match_issues(self, issue_ids: List[List[str]], ticket_summary: str) -> List:
"""Matches related ticket ID's."""
to_link = []
error = ticket_summary.split("_")[3]
robot = ticket_summary.split("_")[0]
try:
error = ticket_summary.split("_")[3]
robot = ticket_summary.split("_")[0]
except IndexError:
error = ""
robot = ""
# for every issue see if both match, if yes then grab issue ID and add it to a list
for issue in issue_ids:
summary = issue[1]
Expand Down
24 changes: 10 additions & 14 deletions abr-testing/abr_testing/data_collection/abr_google_drive.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,13 +65,7 @@ def create_data_dictionary(
left_pipette = file_results.get("left", "")
right_pipette = file_results.get("right", "")
extension = file_results.get("extension", "")
(
num_of_errors,
error_type,
error_code,
error_instrument,
error_level,
) = read_robot_logs.get_error_info(file_results)
error_dict = read_robot_logs.get_error_info(file_results)

all_modules = get_modules(file_results)

Expand Down Expand Up @@ -99,7 +93,7 @@ def create_data_dictionary(
pass # Handle datetime parsing errors if necessary

if run_time_min > 0:
row = {
run_row = {
"Robot": robot,
"Run_ID": run_id,
"Protocol_Name": protocol_name,
Expand All @@ -108,15 +102,13 @@ def create_data_dictionary(
"Start_Time": start_time_str,
"End_Time": complete_time_str,
"Run_Time (min)": run_time_min,
"Errors": num_of_errors,
"Error_Code": error_code,
"Error_Type": error_type,
"Error_Instrument": error_instrument,
"Error_Level": error_level,
}
instrument_row = {
"Left Mount": left_pipette,
"Right Mount": right_pipette,
"Extension": extension,
}
row = {**run_row, **error_dict, **instrument_row}
tc_dict = read_robot_logs.thermocycler_commands(file_results)
hs_dict = read_robot_logs.hs_commands(file_results)
tm_dict = read_robot_logs.temperature_module_commands(file_results)
Expand All @@ -128,7 +120,11 @@ def create_data_dictionary(
"Average Temp (oC)": "",
"Average RH(%)": "",
}
row_for_lpc = {**row, **all_modules, **notes}
row_for_lpc = {
**row,
**all_modules,
**notes,
}
row_2 = {
**row,
**all_modules,
Expand Down
23 changes: 12 additions & 11 deletions abr-testing/abr_testing/data_collection/abr_robot_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,15 +62,17 @@ def compare_current_trh_to_average(
df_all_run_data["Start_Time"] = pd.to_datetime(
df_all_run_data["Start_Time"], format="mixed", utc=True
).dt.tz_localize(None)
df_all_run_data["Errors"] = pd.to_numeric(df_all_run_data["Errors"])
df_all_run_data["Run Ending Error"] = pd.to_numeric(
df_all_run_data["Run Ending Error"]
)
df_all_run_data["Average Temp (oC)"] = pd.to_numeric(
df_all_run_data["Average Temp (oC)"]
)
common_filters = (
(df_all_run_data["Robot"] == robot)
& (df_all_run_data["Start_Time"] >= weeks_ago_3)
& (df_all_run_data["Start_Time"] <= start_time)
& (df_all_run_data["Errors"] < 1)
& (df_all_run_data["Run Ending Error"] < 1)
& (df_all_run_data["Average Temp (oC)"] > 1)
)

Expand Down Expand Up @@ -122,7 +124,7 @@ def compare_lpc_to_historical_data(
& (df_lpc_data["Robot"] == robot)
& (df_lpc_data["Module"] == labware_dict["Module"])
& (df_lpc_data["Adapter"] == labware_dict["Adapter"])
& (df_lpc_data["Errors"] < 1)
& (df_lpc_data["Run Ending Error"] < 1)
]
# Converts coordinates to floats and finds averages.
x_float = [float(value) for value in relevant_lpc["X"]]
Expand Down Expand Up @@ -330,18 +332,17 @@ def get_run_error_info_from_robot(
ip, results, storage_directory
)
# Error Printout
(
num_of_errors,
error_type,
error_code,
error_instrument,
error_level,
) = read_robot_logs.get_error_info(results)
error_dict = read_robot_logs.get_error_info(results)
error_level = error_dict["Error_Level"]
error_type = error_dict["Error_Type"]
error_code = error_dict["Error_Code"]
error_instrument = error_dict["Error_Instrument"]
# JIRA Ticket Fields

failure_level = "Level " + str(error_level) + " Failure"

components = [failure_level, "Flex-RABR"]
components = match_error_to_component("RABR", error_type, components)
components = match_error_to_component("RABR", str(error_type), components)
print(components)
affects_version = results["API_Version"]
parent = results.get("robot_name", "")
Expand Down
122 changes: 83 additions & 39 deletions abr-testing/abr_testing/data_collection/read_robot_logs.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,36 @@ def command_time(command: Dict[str, str]) -> float:
return start_to_complete


def count_command_in_run_data(
commands: List[Dict[str, Any]], command_of_interest: str, find_avg_time: bool
) -> Tuple[int, float]:
"""Count number of times command occurs in a run."""
total_command = 0
total_time = 0.0
for command in commands:
command_type = command["commandType"]
if command_type == command_of_interest:
total_command += 1
if find_avg_time:
started_at = command.get("startedAt", "")
completed_at = command.get("completedAt", "")

if started_at and completed_at:
try:
start_time = datetime.strptime(
started_at, "%Y-%m-%dT%H:%M:%S.%f%z"
)
end_time = datetime.strptime(
completed_at, "%Y-%m-%dT%H:%M:%S.%f%z"
)
total_time += (end_time - start_time).total_seconds()
except ValueError:
# Handle case where date parsing fails
pass
avg_time = total_time / total_command if total_command > 0 else 0.0
return total_command, avg_time


def instrument_commands(file_results: Dict[str, Any]) -> Dict[str, float]:
"""Count number of pipette and gripper commands per run."""
pipettes = file_results.get("pipettes", "")
Expand All @@ -89,6 +119,7 @@ def instrument_commands(file_results: Dict[str, Any]) -> Dict[str, float]:
right_pipette_id = ""
left_pipette_id = ""
gripper_pickups = 0.0
avg_liquid_probe_time_sec = 0.0
# Match pipette mount to id
for pipette in pipettes:
if pipette["mount"] == "right":
Expand Down Expand Up @@ -120,6 +151,9 @@ def instrument_commands(file_results: Dict[str, Any]) -> Dict[str, float]:
and command["params"]["strategy"] == "usingGripper"
):
gripper_pickups += 1
liquid_probes, avg_liquid_probe_time_sec = count_command_in_run_data(
commandData, "liquidProbe", True
)
pipette_dict = {
"Left Pipette Total Tip Pick Up(s)": left_tip_pick_up,
"Left Pipette Total Aspirates": left_aspirate,
Expand All @@ -128,6 +162,8 @@ def instrument_commands(file_results: Dict[str, Any]) -> Dict[str, float]:
"Right Pipette Total Aspirates": right_aspirate,
"Right Pipette Total Dispenses": right_dispense,
"Gripper Pick Ups": gripper_pickups,
"Total Liquid Probes": liquid_probes,
"Average Liquid Probe Time (sec)": avg_liquid_probe_time_sec,
}
return pipette_dict

Expand Down Expand Up @@ -362,50 +398,58 @@ def create_abr_data_sheet(
return sheet_location


def get_error_info(file_results: Dict[str, Any]) -> Tuple[int, str, str, str, str]:
def get_error_info(file_results: Dict[str, Any]) -> Dict[str, Any]:
"""Determines if errors exist in run log and documents them."""
error_levels = []
error_level = ""
# Read error levels file
with open(ERROR_LEVELS_PATH, "r") as error_file:
error_levels = list(csv.reader(error_file))
num_of_errors = len(file_results["errors"])
if num_of_errors == 0:
error_type = ""
error_code = ""
error_instrument = ""
error_level = ""
return 0, error_type, error_code, error_instrument, error_level
error_levels = {row[1]: row[4] for row in csv.reader(error_file)}
# Initialize Variables
recoverable_errors: Dict[str, int] = dict()
total_recoverable_errors = 0
end_run_errors = len(file_results["errors"])
commands_of_run: List[Dict[str, Any]] = file_results.get("commands", [])
error_recovery = file_results.get("hasEverEnteredErrorRecovery", False)
# Count recoverable errors
if error_recovery:
for command in commands_of_run:
error_info = command.get("error", {})
if error_info.get("isDefined"):
total_recoverable_errors += 1
error_type = error_info.get("errorType", "")
recoverable_errors[error_type] = (
recoverable_errors.get(error_type, 0) + 1
)
# Get run-ending error info
try:
run_command_error: Dict[str, Any] = commands_of_run[-1]
error_str: int = len(run_command_error.get("error", ""))
except IndexError:
error_str = 0
if error_str > 1:
error_type = run_command_error["error"].get("errorType", "")
run_command_error = commands_of_run[-1]["error"]
error_type = run_command_error.get("errorType", "")
if error_type == "PythonException":
# Reassign error_type to be more descriptive
error_type = run_command_error.get("detail", "").split(":")[0]
error_code = run_command_error["error"].get("errorCode", "")
error_type = commands_of_run[-1].get("detail", "").split(":")[0]
error_code = run_command_error.get("errorCode", "")
error_instrument = run_command_error.get("errorInfo", {}).get(
"node", run_command_error.get("errorInfo", {}).get("port", "")
)
except (IndexError, KeyError):
try:
# Instrument Error
error_instrument = run_command_error["error"]["errorInfo"]["node"]
except KeyError:
# Module
error_instrument = run_command_error["error"]["errorInfo"].get("port", "")
else:
error_type = file_results["errors"][0]["errorType"]
error_code = file_results["errors"][0]["errorCode"]
error_instrument = file_results["errors"][0]["detail"]
for error in error_levels:
code_error = error[1]
if code_error == error_code:
error_level = error[4]
if len(error_level) < 1:
error_level = str(4)

return num_of_errors, error_type, error_code, error_instrument, error_level
error_details = file_results.get("errors", [{}])[0]
except IndexError:
error_details = {}
error_type = error_details.get("errorType", "")
error_code = error_details.get("errorCode", "")
error_instrument = error_details.get("detail", "")
# Determine error level
error_level = error_levels.get(error_code, "4")
# Create dictionary with all error descriptions
error_dict = {
"Total Recoverable Error(s)": total_recoverable_errors,
"Recoverable Error(s) Description": recoverable_errors,
"Run Ending Error": end_run_errors,
"Error_Code": error_code,
"Error_Type": error_type,
"Error_Instrument": error_instrument,
"Error_Level": error_level,
}
return error_dict


def write_to_local_and_google_sheet(
Expand Down Expand Up @@ -570,10 +614,10 @@ def get_calibration_offsets(
def get_logs(storage_directory: str, ip: str) -> List[str]:
"""Get Robot logs."""
log_types: List[Dict[str, Any]] = [
{"log type": "api.log", "records": 1000},
{"log type": "api.log", "records": 10000},
{"log type": "server.log", "records": 10000},
{"log type": "serial.log", "records": 10000},
{"log type": "touchscreen.log", "records": 1000},
{"log type": "touchscreen.log", "records": 10000},
]
all_paths = []
for log_type in log_types:
Expand Down
Loading

0 comments on commit f15b987

Please sign in to comment.