diff --git a/cli/medperf/_version.py b/cli/medperf/_version.py index 3dc1f76bc..b3f475621 100644 --- a/cli/medperf/_version.py +++ b/cli/medperf/_version.py @@ -1 +1 @@ -__version__ = "0.1.0" +__version__ = "0.1.2" diff --git a/docs/mlcubes/mlcube_models.md b/docs/mlcubes/mlcube_models.md index a72a76ba5..6b8222d93 100644 --- a/docs/mlcubes/mlcube_models.md +++ b/docs/mlcubes/mlcube_models.md @@ -55,7 +55,7 @@ docker_image_name [docker/image:latest]: # (6)! 2. Determines how the MLCube root folder will be named. 3. Gives a Human-readable description to the MLCube Project. 4. Documents the MLCube implementation by specifying the author. -5. Indicates how many GPUs should be visible by the MLCube. +5. Set it to 0. This is now ignored and will be removed in the next release. Please check the last section to learn how to use MLCube with GPUs. 6. MLCubes use Docker containers under the hood. Here, you can provide an image tag to the image that will be created by this MLCube. **You should use a valid name that allows you to upload it to a Docker registry.** After filling the configuration options, the following directory structure will be generated: @@ -232,9 +232,6 @@ accelerator_count [0]: 0 docker_image_name [docker/image:latest]: repository/model-tutorial:0.0.0 ``` -!!! note - This example is built to be used with a CPU. See the [last section](#using-the-example-with-gpus) to know how to configure this example with a GPU. - Note that `docker_image_name` is arbitrarily chosen. Use a valid docker image. ### Move your Codebase @@ -355,6 +352,11 @@ The provided example codebase runs only on CPU. You can modify it to have `pytor The general instructions for building an MLCube to work with a GPU are the same as the provided instructions, but with the following slight modifications: -- Use a number different than `0` for the `accelerator_count` that you will be prompted with when creating the MLCube template. -- Inside the `docker` section of the `mlcube.yaml`, add a key value pair: `gpu_args: --gpus=all`. These `gpu_args` will be passed to `docker run` under the hood by MLCube. You may add more than just `--gpus=all`. - Make sure you install the required GPU dependencies in the docker image. For instance, this may be done by simply modifying the `pip` dependencies in the `requirements.txt` file to download `pytorch` with cuda, or by changing the base image of the dockerfile. + +For testing your MLCube with GPUs using the MLCube tool as in the previous section, make sure you run the `mlcube run` command with a `--gpus` argument. Example: `mlcube run --gpus=all ...` + +For testing your MLCube with GPUs using MedPerf, make sure you pass as well the `--gpus` argument to the MedPerf command. Example: `medperf --gpus=all ...`. + +!!!tip + Run `medperf --help` to see the possible options you can use for the `--gpus` argument. diff --git a/scripts/get_dataset_hashes.py b/scripts/get_dataset_hashes.py new file mode 100644 index 000000000..82cf54484 --- /dev/null +++ b/scripts/get_dataset_hashes.py @@ -0,0 +1,50 @@ +import hashlib +import os +import yaml + +from medperf import config +from medperf.init import initialize +from typer import Option + + +def sha256sum(filename): + h = hashlib.sha256() + b = bytearray(128 * 1024) + mv = memoryview(b) + with open(filename, "rb", buffering=0) as f: + while n := f.readinto(mv): + h.update(mv[:n]) + return h.hexdigest() + + +def generate_hash_dict(path): + hash_dict = {} + contents = os.listdir(path) + + for item in contents: + item_path = os.path.join(path, item) + if os.path.isdir(item_path): + hash_dict[item] = generate_hash_dict(item_path) + else: + hash_dict[item] = sha256sum(item_path) + + return hash_dict + + +def main( + dataset_uid: str = Option(None, "-d", "--dataset"), + output_file: str = Option("dataset_hashes.yaml", "-f", "--file"), +): + initialize() + dset_path = os.path.join(config.datasets_folder, dataset_uid) + + # Get hashes of tree + hash_dict = generate_hash_dict(dset_path) + + # Write results to a file + with open(output_file, "w") as f: + yaml.dump(hash_dict, f) + + +if __name__ == "__main__": + run(main) diff --git a/scripts/get_reviewed_cases_hashes.py b/scripts/get_reviewed_cases_hashes.py new file mode 100644 index 000000000..02e690b6e --- /dev/null +++ b/scripts/get_reviewed_cases_hashes.py @@ -0,0 +1,56 @@ +import tarfile +import hashlib +import shutil +import os +import yaml + + +def sha256sum(filename): + h = hashlib.sha256() + b = bytearray(128 * 1024) + mv = memoryview(b) + with open(filename, "rb", buffering=0) as f: + while n := f.readinto(mv): + h.update(mv[:n]) + return h.hexdigest() + + +def generate_hash_dict(path): + hash_dict = {} + contents = os.listdir(path) + + for item in contents: + item_path = os.path.join(path, item) + if os.path.isdir(item_path): + hash_dict[item] = generate_hash_dict(item_path) + else: + hash_dict[item] = sha256sum(item_path) + + return hash_dict + + +def main(): + dst = ".reviewed_cases_contents" + hashes_file = "reviewed_cases_hashes.yaml" + + # Create destination folder + shutil.rmtree(dst, ignore_errors=True) + os.makedirs(dst, exist_ok=True) + + # Extract contents + with tarfile.open("reviewed_cases.tar.gz") as file: + file.extractall(dst) + + # Get hashes of tree + hash_dict = generate_hash_dict(dst) + + # Write results to a file + with open(hashes_file, "w") as f: + yaml.dump(hash_dict, f) + + # Delete generated files and folders + shutil.rmtree(dst, ignore_errors=True) + + +if __name__ == "__main__": + main() diff --git a/scripts/monitor/rano_monitor/__main__.py b/scripts/monitor/rano_monitor/__main__.py index 926ac2fcf..1e50d4e4f 100644 --- a/scripts/monitor/rano_monitor/__main__.py +++ b/scripts/monitor/rano_monitor/__main__.py @@ -7,13 +7,12 @@ DEFAULT_STAGES_PATH, STAGES_HELP, DSET_LOC_HELP, - OUT_HELP + OUT_HELP, ) from rano_monitor.dataset_browser import DatasetBrowser from rano_monitor.handlers import InvalidHandler from rano_monitor.handlers import PromptHandler from rano_monitor.handlers import ReportHandler, ReportState -from rano_monitor.handlers import ReviewedHandler from rano_monitor.handlers import TarballReviewedHandler from rano_monitor.tarball_browser import TarballBrowser from typer import Option @@ -40,13 +39,11 @@ def run_dset_app(dset_path, stages_path, output_path): report_state = ReportState(report_path, t_app) report_watchdog = ReportHandler(report_state) prompt_watchdog = PromptHandler(dset_data_path, t_app) - reviewed_watchdog = ReviewedHandler(dset_data_path, t_app) invalid_watchdog = InvalidHandler(invalid_path, t_app) t_app.set_vars( dset_data_path, stages_path, - reviewed_watchdog, output_path, invalid_path, invalid_watchdog, @@ -56,7 +53,6 @@ def run_dset_app(dset_path, stages_path, output_path): observer = Observer() observer.schedule(report_watchdog, dset_path) observer.schedule(prompt_watchdog, os.path.join(dset_path, "data")) - observer.schedule(reviewed_watchdog, ".") observer.schedule(invalid_watchdog, os.path.dirname(invalid_path)) observer.start() t_app.run() @@ -89,13 +85,8 @@ def run_tarball_app(tarball_path): @app.command() def main( - dataset_uid: str = Option(None, "-d", "--dataset", help=DSET_HELP), - stages_path: str = Option( - DEFAULT_STAGES_PATH, - "-s", - "--stages", - help=STAGES_HELP - ), + dataset_uid: str = Option(..., "-d", "--dataset", help=DSET_HELP), + stages_path: str = Option(DEFAULT_STAGES_PATH, "-s", "--stages", help=STAGES_HELP), dset_path: str = Option( None, "-p", diff --git a/scripts/monitor/rano_monitor/assets/subject-browser.tcss b/scripts/monitor/rano_monitor/assets/subject-browser.tcss index 0e2bfd618..f0f950606 100644 --- a/scripts/monitor/rano_monitor/assets/subject-browser.tcss +++ b/scripts/monitor/rano_monitor/assets/subject-browser.tcss @@ -71,11 +71,6 @@ SubjectDetails Button { column-span: 2; } -SubjectDetails { - overflow-y: scroll; - height: 100%; -} - SubjectDetails CopyableItem { layout: grid; grid-size: 12 1; @@ -121,7 +116,6 @@ Summary { padding: 3; align: center middle; content-align: center middle; - overflow-y: scroll; } Summary Static { @@ -148,4 +142,9 @@ ListItem .subtitle { MarkdownViewer { height: auto; min-height: 2; +} + +.review-btn { + width: 100%; + margin: 1; } \ No newline at end of file diff --git a/scripts/monitor/rano_monitor/constants.py b/scripts/monitor/rano_monitor/constants.py index 8e115568c..7330ce36c 100644 --- a/scripts/monitor/rano_monitor/constants.py +++ b/scripts/monitor/rano_monitor/constants.py @@ -29,4 +29,5 @@ LISTITEM_MAX_LEN = 30 REVIEWED_PATTERN = r".*\/(.*)\/(.*)\/finalized\/(.*\.nii\.gz)" +UNDER_REVIEW_PATTERN = r".*\/(.*)\/(.*)\/under_review\/(.*\.nii\.gz)" BRAINMASK_PATTERN = r".*\/(.*)\/(.*)\/brainMask_fused.nii.gz" diff --git a/scripts/monitor/rano_monitor/dataset_browser.py b/scripts/monitor/rano_monitor/dataset_browser.py index d08618b08..3f75a092f 100644 --- a/scripts/monitor/rano_monitor/dataset_browser.py +++ b/scripts/monitor/rano_monitor/dataset_browser.py @@ -11,7 +11,7 @@ from rano_monitor.widgets.summary import Summary from textual.app import App, ComposeResult from textual.binding import Binding -from textual.containers import Container, Horizontal +from textual.containers import Container, Horizontal, VerticalScroll from textual.reactive import reactive, var from textual.widgets import ( Button, @@ -42,7 +42,6 @@ def set_vars( self, dset_data_path, stages_path, - reviewed_watchdog, output_path, invalid_path, invalid_watchdog, @@ -50,7 +49,6 @@ def set_vars( ): self.dset_data_path = dset_data_path self.stages_path = stages_path - self.reviewed_watchdog = reviewed_watchdog self.output_path = output_path self.invalid_path = invalid_path self.invalid_watchdog = invalid_watchdog @@ -65,8 +63,9 @@ def compose(self) -> ComposeResult: with Container(): with Container(id="list-container"): yield SubjectListView(id="subjects-list") - yield Summary(id="summary") - yield SubjectDetails(id="details") + with VerticalScroll(): + yield Summary(id="summary") + yield SubjectDetails(id="details") with Container(id="confirm-prompt"): yield Static(self.prompt, id="confirm-details") yield Horizontal( diff --git a/scripts/monitor/rano_monitor/handlers/__init__.py b/scripts/monitor/rano_monitor/handlers/__init__.py index 44e4dafee..1659d7c07 100644 --- a/scripts/monitor/rano_monitor/handlers/__init__.py +++ b/scripts/monitor/rano_monitor/handlers/__init__.py @@ -1,7 +1,6 @@ from .invalid_handler import InvalidHandler from .prompt_handler import PromptHandler from .report_handler import ReportHandler, ReportState -from .reviewed_handler import ReviewedHandler from .tarball_reviewed_watchdog import TarballReviewedHandler __all__ = [ @@ -9,6 +8,5 @@ PromptHandler, ReportHandler, ReportState, - ReviewedHandler, TarballReviewedHandler, ] diff --git a/scripts/monitor/rano_monitor/handlers/reviewed_handler.py b/scripts/monitor/rano_monitor/handlers/reviewed_handler.py deleted file mode 100644 index 404072012..000000000 --- a/scripts/monitor/rano_monitor/handlers/reviewed_handler.py +++ /dev/null @@ -1,115 +0,0 @@ -import os -import re -import tarfile - -from rano_monitor.constants import ( - BRAINMASK_PATTERN, - REVIEW_FILENAME, - REVIEWED_PATTERN, -) -from rano_monitor.utils import delete -from watchdog.events import FileSystemEventHandler - - -def get_tar_identified_masks(file): - identified_reviewed = [] - identified_brainmasks = [] - try: - with tarfile.open(file, "r") as tar: - for member in tar.getmembers(): - review_match = re.match(REVIEWED_PATTERN, member.name) - if review_match: - identified_reviewed.append(review_match) - - brainmask_match = re.match(BRAINMASK_PATTERN, member.name) - if brainmask_match: - identified_brainmasks.append(brainmask_match) - except Exception: - return [], [] - - return identified_reviewed, identified_brainmasks - - -def get_identified_extract_paths( - identified_reviewed, - identified_brainmasks, - dset_data_path -): - extracts = [] - for reviewed in identified_reviewed: - id, tp, filename = reviewed.groups() - src_path = reviewed.group(0) - dest_path = os.path.join( - dset_data_path, - "tumor_extracted", - "DataForQC", - id, - tp, - "TumorMasksForQC", - "finalized", - ) - if not os.path.exists(dest_path): - # Don't try to add reviewed file if the dest path - # doesn't exist - continue - - # dest_path = os.path.join(dest_path, filename) - extracts.append((src_path, dest_path)) - - for mask in identified_brainmasks: - id, tp = mask.groups() - src_path = mask.group(0) - dest_path = os.path.join( - dset_data_path, - "tumor_extracted", - "DataForQC", - id, - tp, - ) - extracts.append((src_path, dest_path)) - - return extracts - - -class ReviewedHandler(FileSystemEventHandler): - def __init__(self, dset_data_path: str, textual_app): - self.dset_data_path = dset_data_path - self.app = textual_app - self.ext = ".tar.gz" - - for file in os.listdir("."): - if file.endswith(self.ext): - self.move_assets(file) - - def on_modified(self, event): - if os.path.basename(event.src_path) == REVIEW_FILENAME: - return - if event.src_path.endswith(self.ext): - self.move_assets(event.src_path) - - def move_assets(self, file): - identified_masks = get_tar_identified_masks(file) - identified_reviewed, identified_brainmasks = identified_masks - - if len(identified_reviewed): - self.app.notify("Reviewed cases identified") - - if len(identified_brainmasks): - self.app.notify("Brain masks identified") - - extracts = get_identified_extract_paths( - identified_reviewed, - identified_brainmasks, - self.dset_data_path - ) - - with tarfile.open(file, "r") as tar: - for src, dest in extracts: - member = tar.getmember(src) - member.name = os.path.basename(member.name) - target_file = os.path.join(dest, member.name) - # TODO: this might be problematic UX. - # The brainmask might get overwritten unknowingly - if os.path.exists(target_file): - delete(target_file, self.dset_data_path) - tar.extract(member, dest) diff --git a/scripts/monitor/rano_monitor/tarball_browser.py b/scripts/monitor/rano_monitor/tarball_browser.py index c5ef2cf1b..4c81aaa60 100644 --- a/scripts/monitor/rano_monitor/tarball_browser.py +++ b/scripts/monitor/rano_monitor/tarball_browser.py @@ -50,10 +50,12 @@ def set_vars(self, contents_path): def __get_subjects(self): subjects = os.listdir(self.contents_path) + subjects = [subject for subject in subjects if not subject.startswith(".")] subject_timepoint_list = [] for subject in subjects: subject_path = os.path.join(self.contents_path, subject) timepoints = os.listdir(subject_path) + timepoints = [timepoint for timepoint in timepoints if not timepoint.startswith(".")] subject_timepoint_list += [(subject, tp) for tp in timepoints] return subject_timepoint_list diff --git a/scripts/monitor/rano_monitor/utils.py b/scripts/monitor/rano_monitor/utils.py index 0c4f246e1..e0e56d30a 100644 --- a/scripts/monitor/rano_monitor/utils.py +++ b/scripts/monitor/rano_monitor/utils.py @@ -1,4 +1,5 @@ import hashlib +import re import os import shutil import tarfile @@ -14,7 +15,10 @@ BRAINMASK, MANUAL_REVIEW_STAGE, DONE_STAGE, - REVIEW_FILENAME + REVIEW_FILENAME, + REVIEWED_PATTERN, + UNDER_REVIEW_PATTERN, + BRAINMASK_PATTERN, ) @@ -61,7 +65,7 @@ def review_tumor(subject: str, data_path: str, labels_path: str): if not is_nifti and not is_under_review: shutil.copyfile(seg_file, under_review_file) - run_editor(t1c_file, t2f_file, t2w_file, t1n_file, seg_file, label_file) + run_editor(t1c_file, t2f_file, t2w_file, t1n_file, under_review_file, label_file) def review_brain(subject, labels_path, data_path=None): @@ -221,11 +225,7 @@ def package_review_cases(report: pd.DataFrame, dset_path: str): for i, row in review_cases.iterrows(): data_path = to_local_path(row["data_path"], dset_path) labels_path = to_local_path(row["labels_path"], dset_path) - brainscans = get_tumor_review_paths( - row.name, - data_path, - labels_path - )[:-2] + brainscans = get_tumor_review_paths(row.name, data_path, labels_path)[:-2] rawscans = get_brain_review_paths(row.name, labels_path)[:-1] base_path = os.path.join(labels_path, "..") @@ -239,12 +239,7 @@ def package_review_cases(report: pd.DataFrame, dset_path: str): tar.addfile(reviewed_dir) tar.add(labels_path, tar_path) - brainscan_path = os.path.join( - "review_cases", - id, - tp, - "brain_scans" - ) + brainscan_path = os.path.join("review_cases", id, tp, "brain_scans") for brainscan in brainscans: brainscan_target_path = os.path.join( brainscan_path, os.path.basename(brainscan) @@ -273,3 +268,122 @@ def package_review_cases(report: pd.DataFrame, dset_path: str): img_path = os.path.join(base_path, file) img_tar_path = os.path.join(tar_path, file) tar.add(img_path, img_tar_path) + + +def get_tar_identified_masks(file): + identified_reviewed = [] + identified_under_review = [] + identified_brainmasks = [] + try: + with tarfile.open(file, "r") as tar: + for member in tar.getmembers(): + review_match = re.match(REVIEWED_PATTERN, member.name) + if review_match: + identified_reviewed.append(review_match) + + under_review_match = re.match(UNDER_REVIEW_PATTERN, member.name) + if under_review_match: + identified_under_review.append(under_review_match) + + brainmask_match = re.match(BRAINMASK_PATTERN, member.name) + if brainmask_match: + identified_brainmasks.append(brainmask_match) + except Exception: + return [], [], [] + + return identified_reviewed, identified_under_review, identified_brainmasks + + +def get_identified_extract_paths( + identified_reviewed, identified_under_review, identified_brainmasks, dset_data_path +): + extracts = [] + for reviewed in identified_reviewed: + id, tp, filename = reviewed.groups() + src_path = reviewed.group(0) + dest_path = os.path.join( + dset_data_path, + "tumor_extracted", + "DataForQC", + id, + tp, + "TumorMasksForQC", + "finalized", + ) + if not os.path.exists(dest_path): + # Don't try to add reviewed file if the dest path + # doesn't exist + continue + + # dest_path = os.path.join(dest_path, filename) + extracts.append((src_path, dest_path)) + + for under_review in identified_under_review: + id, tp, filename = under_review.groups() + src_path = under_review.group(0) + dest_path = os.path.join( + dset_data_path, + "tumor_extracted", + "DataForQC", + id, + tp, + "TumorMasksForQC", + "under_review", + ) + if not os.path.exists(dest_path): + # Don't try to add reviewed file if the dest path + # doesn't exist + continue + + # dest_path = os.path.join(dest_path, filename) + extracts.append((src_path, dest_path)) + + for mask in identified_brainmasks: + id, tp = mask.groups() + src_path = mask.group(0) + dest_path = os.path.join( + dset_data_path, + "tumor_extracted", + "DataForQC", + id, + tp, + ) + if not os.path.exists(dest_path): + # Don't try to add reviewed file if the dest path + # doesn't exist + continue + + extracts.append((src_path, dest_path)) + + return extracts + + +def unpackage_reviews(file, app, dset_data_path): + identified_masks = get_tar_identified_masks(file) + identified_reviewed, identified_under_review, identified_brainmasks = ( + identified_masks + ) + + if len(identified_reviewed): + app.notify("Reviewed cases identified") + + if len(identified_brainmasks): + app.notify("Brain masks identified") + + extracts = get_identified_extract_paths( + identified_reviewed, + identified_under_review, + identified_brainmasks, + dset_data_path, + ) + + with tarfile.open(file, "r") as tar: + for src, dest in extracts: + member = tar.getmember(src) + member.name = os.path.basename(member.name) + target_file = os.path.join(dest, member.name) + # TODO: this might be problematic UX. + # The brainmask might get overwritten unknowingly + if os.path.exists(target_file): + delete(target_file, dset_data_path) + tar.extract(member, dest) diff --git a/scripts/monitor/rano_monitor/widgets/summary.py b/scripts/monitor/rano_monitor/widgets/summary.py index 36cf5e7ef..9f3f2e7b1 100644 --- a/scripts/monitor/rano_monitor/widgets/summary.py +++ b/scripts/monitor/rano_monitor/widgets/summary.py @@ -1,9 +1,9 @@ +import os import pandas as pd -from rano_monitor.constants import REVIEW_FILENAME -from rano_monitor.handlers.reviewed_handler import ReviewedHandler +from rano_monitor.constants import REVIEW_FILENAME, REVIEWED_FILENAME from rano_monitor.messages import InvalidSubjectsUpdated from rano_monitor.messages import ReportUpdated -from rano_monitor.utils import package_review_cases +from rano_monitor.utils import package_review_cases, unpackage_reviews from textual.app import ComposeResult from textual.containers import Center from textual.widgets import ( @@ -24,11 +24,13 @@ class Summary(Static): def compose(self) -> ComposeResult: yield Static("Report Status") yield Center(id="summary-content") - with Center(): - yield Button("package cases for review", id="package-btn") - - def set_reviewed_watchdog(self, reviewed_watchdog: ReviewedHandler): - self.reviewed_watchdog = reviewed_watchdog + with Center(id="package-btns"): + yield Button( + "package cases for review", classes="review-btn", id="package-btn" + ) + yield Button( + "Load reviewed_cases.tar.gz", classes="review-btn", id="unpackage-btn" + ) def on_report_updated(self, message: ReportUpdated) -> None: report = message.report @@ -38,10 +40,7 @@ def on_report_updated(self, message: ReportUpdated) -> None: self.report = report_df self.update_summary() - def on_invalid_subjects_updated( - self, - message: InvalidSubjectsUpdated - ) -> None: + def on_invalid_subjects_updated(self, message: InvalidSubjectsUpdated) -> None: self.invalid_subjects = message.invalid_subjects self.update_summary() @@ -49,7 +48,7 @@ def update_summary(self): report_df = self.report if report_df.empty: return - package_btn = self.query_one("#package-btn", Button) + package_btns = self.query_one("#package-btns", Center) # Generate progress bars for all states display_report_df = report_df.copy(deep=True) display_report_df.loc[list(self.invalid_subjects), "status_name"] = ( @@ -61,7 +60,7 @@ def update_summary(self): # Attach status_percents["DONE"] = 0.0 - package_btn.display = "MANUAL_REVIEW_REQUIRED" in status_percents + package_btns.display = "MANUAL_REVIEW_REQUIRED" in status_percents widgets = [] for name, val in status_percents.items(): @@ -80,5 +79,15 @@ def update_summary(self): def on_button_pressed(self, event: Button.Pressed) -> None: event.stop() - package_review_cases(self.report, self.dset_path) - self.notify(f"{REVIEW_FILENAME} was created on the working directory") + pkg_btn = self.query_one("#package-btn", Button) + unpkg_btn = self.query_one("#unpackage-btn", Button) + + if event.control == pkg_btn: + package_review_cases(self.report, self.dset_path) + self.notify(f"{REVIEW_FILENAME} was created on the working directory") + elif event.control == unpkg_btn: + if REVIEWED_FILENAME not in os.listdir("."): + self.notify(f"{REVIEWED_FILENAME} not found in {os.path.abspath('.')}") + return + + unpackage_reviews(REVIEWED_FILENAME, self, self.dset_path) diff --git a/scripts/monitor/rano_monitor/widgets/tarball_subject_view.py b/scripts/monitor/rano_monitor/widgets/tarball_subject_view.py index cf6f2cefa..8f939c20a 100644 --- a/scripts/monitor/rano_monitor/widgets/tarball_subject_view.py +++ b/scripts/monitor/rano_monitor/widgets/tarball_subject_view.py @@ -146,5 +146,6 @@ def __tumor_has_been_finalized(self): "finalized" ) finalized_files = os.listdir(finalized_tumor_path) + finalized_files = [file for file in finalized_files if not file.startswith(".")] return len(finalized_files) > 0