diff --git a/.DS_Store b/.DS_Store
new file mode 100644
index 00000000..ceb8eb0a
Binary files /dev/null and b/.DS_Store differ
diff --git a/.gitignore b/.gitignore
index 3f190c95..2e0f19b5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,5 @@
/.vscode
-
+.DS_Store
*.pyc
__pycache__/
@@ -9,4 +9,13 @@ migrations/
*.db
server/qview3dserver.egg-info/
-server/dist
\ No newline at end of file
+server/dist
+
+printeremu/testserver/node_modules/
+
+.DS_Store
+/qodana.yaml
+/Tests/logs/
+/Tests/server/
+/logs/
+/server/logs/
diff --git a/Tests/conftest.py b/Tests/conftest.py
new file mode 100644
index 00000000..b78f6d03
--- /dev/null
+++ b/Tests/conftest.py
@@ -0,0 +1,365 @@
+import math
+import os
+import re
+import sys
+import pytest
+from _pytest.terminal import TerminalWriter, TerminalReporter
+
+
+@pytest.hookimpl(trylast=True)
+def pytest_configure(config):
+ for arg in config.invocation_params.args:
+ if arg.startswith("--port="):
+ config.port = arg.split("=")[1]
+ config.logger = setup_logger(config.port)
+ logger = config.logger
+ myTR = TerminalReporter(config)
+ myTR.__repr__ = lambda: "My Terminal Reporter"
+ myTW = myTR._tw
+ myTW.__repr__ = lambda: "My Terminal Writer"
+ myTW.logLine: str = ""
+ myTW.fullwidth = 113
+ myTR._screen_width = myTW.fullwidth
+ myTW.hasmarkup = True
+
+ def custom_write(self: TerminalWriter, msg: str, *, flush: bool = False, **markup: bool):
+ if msg:
+ current_line = msg.rsplit("\n", 1)[-1]
+ if "\n" in msg:
+ self._current_line = current_line
+ else:
+ self._current_line += current_line
+
+ msg = self.markup(msg, **markup)
+ log = msg.strip()
+
+ try:
+ if log:
+ if self.logLine == "":
+ self.logLine = msg
+ else:
+ self.logLine = " ".join([self.logLine, msg])
+ if flush:
+ self.logLine = self.logLine.strip()
+ if "red" in markup and markup["red"] == True:
+ logger.error(self.logLine)
+ elif "yellow" in markup and markup["yellow"] == True:
+ logger.warning(self.logLine)
+ else:
+ logger.info(self.logLine)
+ self.logLine = ""
+ self._file.write(msg)
+ except UnicodeEncodeError:
+ msg = msg.encode("unicode-escape").decode("ascii")
+ self._file.write(msg)
+
+ if flush:
+ self.flush()
+
+ myTW.write = custom_write.__get__(myTW, TerminalWriter)
+ terminal_reporter = config.pluginmanager.get_plugin("terminalreporter")
+ if terminal_reporter:
+ config.pluginmanager.unregister(terminal_reporter)
+ config.pluginmanager.register(myTR, "terminalreporter")
+ else:
+ config.pluginmanager.register(myTR, "terminalreporter")
+
+ config.terminalReporter = myTR
+
+
+@pytest.fixture(scope="session", autouse=True)
+def fabricator(request, app):
+ port = request.session.config.port
+ if not port: return None
+ from serial.tools.list_ports_common import ListPortInfo
+ from serial.tools.list_ports_linux import SysFS
+ if isinstance(port, str):
+ fabricator = app.fabricator_list.getFabricatorByPort(port)
+ elif isinstance(port, ListPortInfo) or isinstance(port, SysFS):
+ fabricator = app.fabricator_list.getFabricatorByPort(port.device)
+ else:
+ fabricator = None
+ if fabricator is None:
+ pytest.skip("No port specified")
+ if os.getenv("LEVEL") == "DEBUG":
+ fabricator.device.logger.setLevel(fabricator.device.logger.DEBUG)
+ fabricator.device.connect()
+ yield fabricator
+ fabricator.device.disconnect()
+
+# @pytest.fixture(scope="session", autouse=True)
+# def client(request, app):
+# client = Client(logger=True)
+# port = request.session.config.port
+# portNum = int(port.split("COM")[-1])
+# clientPort = portNum + 5000
+# client.connect(f"http://localhost:{clientPort}")
+# app.client = client
+# yield client
+# client.disconnect()
+
+@pytest.fixture(scope="session", autouse=True)
+def app():
+ from globals import current_app as app
+ with app.app_context():
+ yield app
+ app.fabricator_list.teardown()
+
+
+from Classes.Logger import Logger
+
+intLogger = Logger("Internal Errors", consoleLogger=sys.stdout, fileLogger="internal_errors.log", loggingLevel=Logger.INFO)
+
+# def pytest_internalerror(excrepr, excinfo):
+# # This hook is called when pytest encounters an internal error
+# intLogger.error(f"Internal pytest error:\n{excrepr}")
+
+def pytest_addoption(parser):
+ parser.addoption(
+ "--myVerbose",
+ action="store",
+ default=1,
+ help="my verbose level"
+ )
+ parser.addoption(
+ "--port",
+ action="store",
+ default=None,
+ help="port to test"
+ )
+
+
+def line_separator(interrupter: str, symbol: str = "-", length: int = 136, color: int | None = None, colorAll: bool = False) -> str:
+ if not interrupter:
+ if color:
+ return f"\033[{color}m" + symbol * (length//len(symbol)) + "\033[0m"
+ return symbol * (length//len(symbol))
+ interrupterNoColor = re.sub(r'\033\[[0-9;]*m', '', interrupter)
+ side = (length - 2 - len(interrupterNoColor)) / 2
+ if color:
+ color = f"\033[{color}m"
+ if colorAll:
+ return color + symbol * math.ceil(side) + " " + interrupter + " " + symbol * math.floor(side) + "\033[0m"
+ else:
+ return color + symbol * math.ceil(side) + "\033[0m" + " " + interrupter + " " + color + symbol * math.floor(side) + "\033[0m"
+ return symbol * math.ceil(side) + " " + interrupter + " " + symbol * math.floor(side)
+
+def setup_logger(port):
+ # set up fie location for output logs
+ from globals import root_path
+ log_folder = os.path.join(root_path,"Tests", "logs")
+ os.makedirs(log_folder, exist_ok=True)
+ from datetime import datetime
+ timestamp = datetime.now().strftime("%m-%d-%Y__%H-%M-%S")
+ subfolder = os.path.join(log_folder, timestamp)
+ os.makedirs(subfolder, exist_ok=True)
+ log_file_path = os.path.join(subfolder, f"test_{port}.log")
+ return Logger(port, "Test Printer", consoleLogger=None, fileLogger=log_file_path, showFile=False, showLevel=False)
+
+# @pytest.hookimpl(tryfirst=True)
+# def pytest_sessionstart(session) -> None:
+# for arg in session.config.invocation_params.args:
+# if not hasattr(session.config, "verbosity") and arg.startswith("--myVerbose="):
+# session.config.verbosity = int(arg.split("=")[1])
+# elif not hasattr(session.config, "port") and arg.startswith("--port="):
+# session.config.port = arg.split("=")[1]
+# elif not hasattr(session.config, "testLevel") and arg.startswith("--testLevel="):
+# session.config.testLevel = int(arg.split("=")[1])
+# if session.config.verbosity > 2:
+# session.config.verbosity = 2
+#
+# session.config.start_time = time.time()
+# session.config.passed_count = 0
+# session.config.failed_count = 0
+# session.config.skipped_count = 0
+# session.config.xfailed_count = 0
+# session.config.xpassed_count = 0
+# session.config.failNames = []
+# session.config.fails = {}
+# session.config.logger = setup_logger(session.config.port)
+#
+#
+# if session.config.verbosity >= 0:
+# logger = session.config.logger
+# logger.logMessageOnly("\033[1m" + line_separator("test session starts", symbol="=") + "\033[0m")
+# verinfo = platform.python_version()
+# msg = f"platform {sys.platform} -- Python {verinfo}"
+# pypy_version_info = getattr(sys, "pypy_version_info", None)
+# if pypy_version_info:
+# verinfo = ".".join(map(str, pypy_version_info[:3]))
+# msg += f"[pypy-{verinfo}-{pypy_version_info[3]}]"
+# msg += f", pytest-{pytest.__version__}, pluggy-{pluggy.__version__}"
+# logger.logMessageOnly(msg)
+# logger.logMessageOnly(f"rootdir: {session.config.rootdir}")
+
+def pytest_collection_modifyitems(session, config, items):
+ # session.config.logger.logMessageOnly(f"\033[1m...collected {len(items)} items...")
+ file_order = [
+ "test_app.py",
+ "test_fabricator_list.py",
+ "test_device.py",
+ "test_fabricator.py",
+ ]
+
+ def get_file_order(item):
+ file_name = item.location[0]
+ return file_order.index(file_name) if file_name in file_order else len(file_order)
+
+ # Sort the items based on the file order
+ items.sort(key=get_file_order)
+
+# def pytest_sessionfinish(session, exitstatus) -> None:
+# session_duration = time.time() - session.config.start_time
+# passes = session.config.passed_count
+# fails = session.config.failed_count
+# skips = session.config.skipped_count
+# xfails = session.config.xfailed_count
+# xpasses = session.config.xpassed_count
+# logger = session.config.logger
+#
+# if hasattr(session.config, "_capturemanager"):
+# capture_manager = session.config._capturemanager
+# # Suspend capturing to retrieve the output
+# captured = capture_manager.read_global_and_disable()
+#
+# # Print the captured stdout and stderr
+# logger.logMessageOnly("\nCaptured output during tests:\n")
+# logger.logMessageOnly(captured)
+#
+# # Re-enable capture if needed for further use
+# capture_manager.resume_global_capture()
+#
+# stats = []
+# if passes > 0: stats.append(f"\033[32m\033[1m{passes} passed")
+# if fails > 0: stats.append(f"\033[31m\033[1m{fails} failed")
+# if skips > 0: stats.append(f"\033[33m{skips} skipped")
+# if xfails > 0: stats.append(f"\033[33m{xfails} xfailed")
+# if xpasses > 0: stats.append(f"\031[33m{xpasses} xpassed")
+#
+# if len(stats) > 0: summary = ", ".join(stats)
+# else: summary = "\033[33mno tests ran"
+#
+# summary += f"\033[32m in {session_duration:.2f}s"
+# if session_duration > 3600:
+# summary += f" ({session_duration // 3600:02.0f}:{session_duration % 3600 // 60:02.0f}:{(session_duration % 60)//1:02.0f}.{(session_duration % 1).__round__(2) * 100 // 1:02.0f})"
+# elif session_duration > 60:
+# summary += f" ({session_duration // 60:02.0f}:{(session_duration % 60)//1:02.0f}.{(session_duration % 1).__round__(2) * 100 // 1:02.0f})"
+#
+# if session.config.failed_count > 0:
+# headerText = "\n" + line_separator("FAILURES", symbol="=")
+# logger.logMessageOnly(headerText, logLevel=logger.ERROR)
+# for failTest in session.config.failNames:
+# logger.logMessageOnly(line_separator(failTest, symbol="_"), end="\n", logLevel=logger.ERROR)
+# if not hasattr(session.config.fails[failTest], "reprtraceback"):
+# if not hasattr(session.config.fails[failTest], "longrepr"):
+# if hasattr(session.config.fails[failTest], "errorstring"):
+# logger.error(session.config.fails[failTest].errorstring)
+# else:
+# logger.error(session.config.fails[failTest])
+# else:
+# logger.error(session.config.fails[failTest].longrepr)
+# elif not hasattr(session.config.fails[failTest].reprtraceback, "reprentries"):
+# logger.error(session.config.fails[failTest].reprtraceback)
+# else:
+# logger.logException(session.config.fails[failTest].reprtraceback.reprentries)
+# logger.logMessageOnly("\n\033[32m" + line_separator(summary, symbol="="))
+#
+visited_modules = set()
+
+@pytest.hookimpl(tryfirst=True, hookwrapper=True)
+def pytest_runtest_protocol(item, nextitem):
+ module_name = item.module.__name__
+ if module_name not in visited_modules:
+ visited_modules.add(module_name)
+ item.config.terminalReporter.write("\n" + line_separator(item.module.__desc__(), symbol="-", length=item.config.terminalReporter._tw.fullwidth - 1), flush=True)
+ yield
+#
+# @pytest.hookimpl(hookwrapper=True)
+# def pytest_runtest_logreport(report):
+# verbosity = report.verbosity
+# yield
+# logger = report.logger
+# port = report.port
+# if (report.when == "setup" and not report.passed) or (report.when == "call"):
+# if port is None:
+# # Retrieve port from the test function if it's set as an attribute
+# port = os.getenv("PORT")
+#
+# if verbosity == 0:
+# if report.passed:
+# logger.info("\033[32m.\033[0m")
+# elif report.failed:
+# logger.info("\033[31mF\033[0m")
+# elif report.skipped:
+# logger.info("\033[33ms\033[0m")
+# elif hasattr(report, "xfailed") and report.xfailed:
+# logger.info("\033[33mX\033[0m")
+# elif hasattr(report, "xpassed") and report.xpassed:
+# logger.info("\033[31mx\033[0m")
+# else:
+# logger.info(f"IDK what happened!?!?: {report}")
+# elif verbosity == 1:
+# loc = report.nodeid.split("::")[-1]
+# testString = f"{loc}[{port}]{' ' * (59 - len(loc) - len(str(port)) - 2)}"
+# if report.passed:
+# logger.info(f"{testString} \033[32mPASSED\033[0m")
+# elif report.failed:
+# logger.info(f"{testString} \033[31mFAILED\033[0m")
+# elif report.skipped:
+# logger.info(f"{testString} \033[33mSKIPPED\033[0m")
+# elif hasattr(report, "xfailed") and report.xfailed:
+# logger.info(f"{testString} \033[33mXFAILED\033[0m")
+# elif hasattr(report, "xpassed") and report.xpassed:
+# logger.info(f"{testString} \033[31mXPASSED\033[0m")
+# else:
+# logger.info(f"{testString} IDK what happened!?!?: {report}")
+# elif verbosity >= 2:
+# loc = report.nodeid
+# testString = f"{loc}[{port}]{' ' * (79 - len(loc) - len(str(port)) - 2)}"
+# if report.passed:
+# logger.info(f"{testString} \033[32mPASSED\033[0m")
+# elif report.failed:
+# logger.info(f"{testString} \033[31mFAILED\033[0m:\n\n")
+# if not hasattr(report, "longrepr"):
+# if hasattr(report, "errorstring"):
+# logger.error(report.errorstring)
+# else:
+# logger.error(report)
+# elif not hasattr(report.longrepr, "reprtraceback"):
+# logger.error(report.longrepr)
+# elif not hasattr(report.longrepr.reprtraceback, "reprentries"):
+# logger.error(report.longrepr.reprtraceback)
+# else:
+# logger.logException(report.longrepr.reprtraceback.reprentries)
+# logger.logException(report.longrepr.reprtraceback.reprentries)
+# elif report.skipped:
+# logger.info(f"{testString} \033[33mSKIPPED\033[0m: {report.longrepr[-1].split('Skipped: ')[-1]}")
+# else:
+# logger.info(f"{testString} IDK what happened!?!?: {report}")
+#
+# def pytest_collectreport(report):
+# if report.failed:
+# intLogger.logMessageOnly(f"Collection failed:", logLevel=intLogger.ERROR)
+# if not hasattr(report.longrepr, "reprtraceback"):
+# intLogger.logException(report.longrepr.longrepr)
+# return
+# if not hasattr(report.longrepr.reprtraceback, "reprentries"):
+# intLogger.logException(report.longrepr.reprtraceback)
+# return
+# else: intLogger.logException(report.longrepr.reprtraceback.reprentries)
+
+def pytest_terminal_summary(terminalreporter: TerminalReporter, exitstatus, config):
+ import time
+ from _pytest.terminal import format_session_duration
+ session_duration = time.time() - terminalreporter._sessionstarttime
+ (parts, main_color) = terminalreporter.build_summary_stats_line()
+ line_parts = []
+ for text, markup in parts:
+ with_markup = terminalreporter._tw.markup(text, **markup)
+ line_parts.append(with_markup)
+ msg = ", ".join(line_parts)
+ main_markup = {main_color: True}
+ duration = f" in {format_session_duration(session_duration)}"
+ duration_with_markup = terminalreporter._tw.markup(duration, **main_markup)
+ msg += duration_with_markup
+ config.logger.logMessageOnly("\n" + line_separator(msg, symbol="=", length=terminalreporter._tw.fullwidth, color=terminalreporter._tw._esctable[main_color]))
diff --git a/Tests/parallel_test_runner.py b/Tests/parallel_test_runner.py
new file mode 100644
index 00000000..9c1eb35e
--- /dev/null
+++ b/Tests/parallel_test_runner.py
@@ -0,0 +1,70 @@
+import os
+import sys
+import re
+import subprocess
+import platform
+
+# Add test root to sys.path if needed
+from globals import root_path
+if root_path not in sys.path:
+ sys.path.append(root_path)
+serverpath = os.path.join(root_path, "server")
+if serverpath not in sys.path:
+ sys.path.append(serverpath)
+testpath = os.path.join(root_path, "Tests")
+if testpath not in sys.path:
+ sys.path.append(testpath)
+
+from server.Classes.Ports import Ports
+
+PORTS = []
+# List of available ports for testing
+if platform.system() == "Windows":
+ import winreg
+ path = "HARDWARE\\DEVICEMAP\\SERIALCOMM"
+ try:
+ key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, path)
+ for i in range(256):
+ try:
+ val = winreg.EnumValue(key, i)
+ if re.match(r"COM\d+|LPT\d+", val[1]):
+ PORTS.append(val[1])
+ except OSError:
+ break
+ except FileNotFoundError:
+ pass
+elif platform.system() == "Darwin":
+ import glob
+ PORTS = glob.glob("/dev/tty.*")
+else:
+ import glob
+ PORTS = glob.glob("/dev/tty[A-Za-z]*")
+
+
+
+# Function to run pytest for a specific port
+testLevel = 10
+verbosity = 2
+runFlags = 0b010 # 0b001: -s, 0b010: -vvv or -p no:terminal, 0b100: debug or info
+
+def run_tests_for_port(comm_port):
+ env = os.environ.copy()
+ env["PORT"] = comm_port
+ args = ["pytest", testpath, f"--myVerbose={verbosity}", f"--port={comm_port}"]
+ if runFlags & 0b1: args.append("-s")
+ args.append("-vv") if runFlags & 0b10 else args.append("-p no:terminal")
+ env["LEVEL"] = "DEBUG" if runFlags & 0b100 else "INFO"
+ subprocess.Popen(args, env=env).wait()
+
+if __name__ == "__main__":
+ from concurrent.futures import ThreadPoolExecutor, as_completed
+ if len(PORTS) != 0:
+ with ThreadPoolExecutor(max_workers=len(PORTS)) as executor:
+ futures = [executor.submit(run_tests_for_port, port) for port in PORTS if
+ Ports.getPortByName(port) is not None]
+ for future in as_completed(futures):
+ try:
+ future.result()
+ except Exception as e:
+ from globals import current_app
+ current_app.handle_errors_and_logging(e)
\ No newline at end of file
diff --git a/Tests/test_app.py b/Tests/test_app.py
new file mode 100644
index 00000000..32835748
--- /dev/null
+++ b/Tests/test_app.py
@@ -0,0 +1,146 @@
+import os
+import re
+import pytest
+from Classes.Logger import Logger
+from parallel_test_runner import testLevel
+
+def __desc__(): return "App Tests"
+
+@pytest.mark.dependency()
+@pytest.mark.skipif(condition=testLevel < 1, reason="Not doing lvl 1 tests")
+def test_db_to_make_sure_it_has_valid_file_path(app):
+ db_file_no_path = app.config["SQLALCHEMY_DATABASE_URI"].split("/")[-1].split("\\")[-1]
+ assert db_file_no_path, "database_uri doesn't exist?"
+ assert db_file_no_path == "hvamc.db", f"database_uri is {db_file_no_path}"
+ assert os.path.exists(app.config["SQLALCHEMY_DATABASE_URI"].split("sqlite:///")[
+ -1]), f"Database file {app.config["SQLALCHEMY_DATABASE_URI"].split("sqlite:///")[-1]} does not exist"
+
+
+@pytest.mark.dependency(depends=["test_db_to_make_sure_it_has_valid_file_path"])
+@pytest.mark.skipif(condition=testLevel < 1, reason="Not doing lvl 1 tests")
+def test_base_url_for_http_responses_has_valid_format(app):
+ assert app.config["base_url"], "base_url doesnt exist?"
+ assert re.match(r"http://\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}$", app.config["base_url"]) or re.match(
+ r"http://localhost:\d{1,5}$", app.config["base_url"]), f"base_url is {app.config['base_url']}"
+
+
+@pytest.mark.dependency(depends=["test_base_url_for_http_responses_has_valid_format"])
+@pytest.mark.skipif(condition=testLevel < 1, reason="Not doing lvl 1 tests")
+def test_environment_for_development(app):
+ assert app.config["environment"], "environment doesnt exist?"
+ assert app.config["environment"] == "development", f"environment is {app.config['environment']}"
+
+
+@pytest.mark.dependency(depends=["test_environment_for_development"])
+@pytest.mark.skipif(condition=testLevel < 1, reason="Not doing lvl 1 tests")
+def test_logger_is_custom_implementation_and_exists(app):
+ assert app.logger, "myLogger doesnt exist?"
+ assert app.logger.name, "name doesnt exist?"
+ assert str(app.logger.name) == "Logger_App", f"myLogger is {str(app.logger.name)}"
+ assert isinstance(app.logger, Logger), "myLogger is not an instance of Logger?"
+ assert app.logger.fileLogger, "fileLogger doesnt exist?"
+
+
+@pytest.mark.dependency(depends=["test_logger_is_custom_implementation_and_exists"])
+@pytest.mark.skipif(condition=testLevel < 1, reason="Not doing lvl 1 tests")
+def test_socketio_exists_and_works(app):
+ assert app.socketio, "socketio doesnt exist?"
+ assert app.socketio.async_mode, "async_mode doesnt exist?"
+ assert app.socketio.async_mode == "threading", f"async_mode is {app.socketio.async_mode}"
+ socketio_test_client = app.socketio.test_client(app)
+ assert socketio_test_client.is_connected(), "socketio_test_client is not connected?"
+ socketio_test_client.emit('my_event', {'data': 'test'})
+ received = socketio_test_client.get_received()
+ assert len(received) == 0, "Response received from socketio"
+
+
+@pytest.mark.dependency(depends=["test_socketio_exists_and_works"])
+@pytest.mark.skipif(condition=testLevel < 1, reason="Not doing lvl 1 tests")
+def test_handle_errors_and_logging(app):
+ assert app.handle_errors_and_logging, "handle_errors_and_logging doesnt exist?"
+ assert callable(app.handle_errors_and_logging), "handle_errors_and_logging is not callable?"
+ assert app.handle_errors_and_logging(Exception("Test Exception")) is False, "handle_errors_and_logging did not return False?"
+
+
+@pytest.mark.dependency(depends=["test_handle_errors_and_logging"])
+@pytest.mark.skipif(condition=testLevel < 1, reason="Not doing lvl 1 tests")
+def test_static_loading_for_client(app):
+ assert app.static_folder, "static_folder doesnt exist?"
+ assert os.path.join("client","dist") in app.static_folder, f"static_folder is {app.static_folder}"
+ assert os.path.exists(app.static_folder), f"static_folder {app.static_folder} does not exist"
+
+
+@pytest.mark.dependency(depends=["test_static_loading_for_client"])
+@pytest.mark.skipif(condition=testLevel < 1, reason="Not doing lvl 1 tests")
+def test_index_html_exists_in_the_static_files(app):
+ assert os.path.exists(os.path.join(app.static_folder, "index.html")), f"index.html does not exist in {app.static_folder}"
+
+
+@pytest.mark.dependency(depends=["test_index_html_exists_in_the_static_files"])
+@pytest.mark.skipif(condition=testLevel < 1, reason="Not doing lvl 1 tests")
+def test_main_view_response_is_200(app):
+ with app.test_client() as client:
+ response = client.get('/')
+ assert response.status_code == 200, f"Response status code is {response.status_code}"
+ assert response.data, "Response data is empty?"
+ assert b'' in response.data, "Response data does not contain ?"
+ assert b'?"
+ assert b'
' in response.data, "Response data does not contain ?"
+ assert b'' in response.data, "Response data does not contain ?"
+ assert b'' in response.data, "Response data does not contain ?"
+ assert b'
' in response.data, 'Response data does not contain
?'
+ assert b'
diff --git a/client/src/components/ConsoleTerminal.vue b/client/src/components/ConsoleTerminal.vue
new file mode 100644
index 00000000..ba02d0fb
--- /dev/null
+++ b/client/src/components/ConsoleTerminal.vue
@@ -0,0 +1,79 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/client/src/components/GCode3DImageViewer.vue b/client/src/components/GCode3DImageViewer.vue
index 8a072e4d..b5af0c0d 100644
--- a/client/src/components/GCode3DImageViewer.vue
+++ b/client/src/components/GCode3DImageViewer.vue
@@ -1,109 +1,154 @@
-
+
\ No newline at end of file
+
diff --git a/client/src/components/GCode3DLiveViewer.vue b/client/src/components/GCode3DLiveViewer.vue
index 9bc7c173..11970026 100644
--- a/client/src/components/GCode3DLiveViewer.vue
+++ b/client/src/components/GCode3DLiveViewer.vue
@@ -1,61 +1,70 @@
@@ -131,7 +110,7 @@ const fileToString = (file: File | undefined) => {
\ No newline at end of file
diff --git a/client/src/components/GCodeThumbnail.vue b/client/src/components/GCodeThumbnail.vue
index 1e625565..9860241f 100644
--- a/client/src/components/GCodeThumbnail.vue
+++ b/client/src/components/GCodeThumbnail.vue
@@ -1,5 +1,5 @@
-
+
This file doesn't have a thumbnail attached, you can check the viewer instead!
-import { printers, useGetPorts, useRetrievePrintersInfo, useHardReset, useDeletePrinter, useNullifyJobs, useEditName, useRemoveThread, useEditThread, useDiagnosePrinter, useRepair, type Device, useRetrievePrinters, useMoveHead } from '../model/ports'
-import { isLoading } from '../model/jobs'
-import { useRouter } from 'vue-router'
+import { printers, useRetrievePrintersInfo, useHardReset, useDeletePrinter, useNullifyJobs, useEditName, useRemoveThread, useEditThread, useDiagnosePrinter, useRepair, type Device, useRetrievePrinters, useMoveHead } from '@/model/ports'
+import { isLoading } from '@/model/jobs'
import { ref, onMounted } from 'vue';
-import { toast } from '../model/toast'
-import RegisterModal from '../components/RegisterModal.vue'
+import { toast } from '@/model/toast'
+import RegisterModal from '@/components/RegisterModal.vue'
import router from '@/router';
const { retrieve } = useRetrievePrinters();
@@ -35,8 +34,7 @@ const selectedPrinter = ref(null);
// fetch list of connected ports from backend and automatically load them into the form dropdown
onMounted(async () => {
isLoading.value = true
- const allPrinters = await retrieve(); // load all registered printers
- registered.value = allPrinters
+ registered.value = await retrieve(); // load all registered printers
isLoading.value = false
});
@@ -113,9 +111,9 @@ const doRepair = async () => {
isLoading.value = false
}
-const doMove = async (printer: Device) => {
+const doMove = async (port: string) => {
isLoading.value = true
- await move(printer.device).then(() => {
+ await move(port).then(() => {
toast.success('Printer moved to home position')
}).catch(() => {
toast.error('Failed to move printer to home position')
@@ -125,9 +123,9 @@ const doMove = async (printer: Device) => {
const doDiagnose = async (printer: Device) => {
isLoading.value = true
- message.value = `Diagnosing ${printer.name}:
This printer is registered under port ${printer.device}.`
+ message.value = `Diagnosing ${printer.name}:
This printer is registered under port ${printer.device['serialPort']}.`
showMessage.value = true
- let str = await diagnose(printer.device)
+ let str = await diagnose(printer.device['serialPort'])
let resstr = str.diagnoseString
message.value += "