diff --git a/.github/workflows/test-spras.yml b/.github/workflows/test-spras.yml index a4d5886b..0f68d83a 100644 --- a/.github/workflows/test-spras.yml +++ b/.github/workflows/test-spras.yml @@ -84,6 +84,7 @@ jobs: docker pull reedcompbio/allpairs:v2 docker pull reedcompbio/domino:latest docker pull reedcompbio/py4cytoscape:v2 + docker pull reedcompbio/spras:v0.1.0 - name: Build Omics Integrator 1 Docker image uses: docker/build-push-action@v1 with: @@ -156,6 +157,15 @@ jobs: tags: v2 cache_froms: reedcompbio/py4cytoscape:latest push: false + - name: Build SPRAS Docker image + uses: docker/build-push-action@v1 + with: + path: . + dockerfile: docker-wrappers/SPRAS/Dockerfile + repository: reedcompbio/spras + tags: v0.1.0 + cache_froms: reedcompbio/spras:v0.1.0 + push: false # Run pre-commit checks on source files pre-commit: @@ -167,6 +177,6 @@ jobs: - name: Setup Python uses: actions/setup-python@v4 with: - python-version: '3.8' # Match this to the version specified in environment.yml + python-version: '3.11' # Match this to the version specified in environment.yml - name: Run pre-commit checks uses: pre-commit/action@v3.0.0 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 55503ef4..67958453 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,7 +3,7 @@ # See https://pre-commit.com/ for documentation default_language_version: # Match this to the version specified in environment.yml - python: python3.8 + python: python3.11 repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 # Use the ref you want to point at diff --git a/Snakefile b/Snakefile index 6a9b513d..71a8a6ed 100644 --- a/Snakefile +++ b/Snakefile @@ -219,7 +219,7 @@ rule reconstruct: # Create a copy so that the updates are not written to the parameters logfile params = reconstruction_params(wildcards.algorithm, wildcards.params).copy() # Add the input files - params.update(dict(zip(runner.get_required_inputs(wildcards.algorithm), *{input}))) + params.update(dict(zip(runner.get_required_inputs(wildcards.algorithm), *{input}, strict=True))) # Add the output file # All run functions can accept a relative path to the output file that should be written that is called 'output_file' params['output_file'] = output.pathway_file diff --git a/config/config.yaml b/config/config.yaml index 5fe6083b..741d8ca9 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -7,6 +7,14 @@ hash_length: 7 # 'singularity'. If container_framework is not specified, SPRAS will default to docker. container_framework: docker +# Only used if container_framework is set to singularity, this will unpack the singularity containers +# to the local filesystem. This is useful when PRM containers need to run inside another container, +# such as would be the case in an HTCondor/OSPool environment. +# NOTE: This unpacks singularity containers to the local filesystem, which will take up space in a way +# that persists after the workflow is complete. To clean up the unpacked containers, the user must +# manually delete them. +unpack_singularity: false + # Allow the user to configure which container registry containers should be pulled from # Note that this assumes container names are consistent across registries, and that the # registry being passed doesn't require authentication for pull actions diff --git a/docker-wrappers/SPRAS/Dockerfile b/docker-wrappers/SPRAS/Dockerfile new file mode 100644 index 00000000..5a721a07 --- /dev/null +++ b/docker-wrappers/SPRAS/Dockerfile @@ -0,0 +1,16 @@ +FROM almalinux:9 + +RUN dnf install -y epel-release + +# gcc/g++ are required for building several of the packages if you're using apple silicon +RUN dnf update -y && \ + dnf install -y gcc gcc-c++ \ + python3.11 python3.11-pip python3.11-devel \ + docker apptainer + +COPY / /spras/ +RUN chmod -R 777 /spras +WORKDIR /spras + +# Install spras into the container +RUN pip3.11 install . diff --git a/docker-wrappers/SPRAS/README.md b/docker-wrappers/SPRAS/README.md new file mode 100644 index 00000000..6187a553 --- /dev/null +++ b/docker-wrappers/SPRAS/README.md @@ -0,0 +1,81 @@ +# SPRAS Docker image + +## Building + +A Docker image for SPRAS that is available on [DockerHub](https://hub.docker.com/repository/docker/reedcompbio/spras) +This image comes bundled with all of the necessary software packages to run SPRAS, and can be used for execution in distributed environments (like HTCondor). + +To create the Docker image, make sure you are in this repository's root directory, and from your terminal run: + +``` +docker build -t /: -f docker-wrappers/SPRAS/Dockerfile . +``` + +For example, to build this image with the intent of pushing it to DockerHub as reedcompbio/spras:v0.1.0, you'd run: +``` +docker build -t reedcompbio/spras:v0.1.0 -f docker-wrappers/SPRAS/Dockerfile . +``` + +This will copy the entire SPRAS repository into the container and install SPRAS with `pip`. As such, any changes you've made to the current SPRAS repository will be reflected in version of SPRAS installed in the container. Since SPRAS +is being installed with `pip`, it's also possible to specify that you want development modules installed as well. If you're using the container for development and you want the optional `pre-commit` and `pytest` packages as well as a +spras package that receives changes without re-installation, change the +`pip` installation line to: + +``` +pip install -e .[dev] +``` + +This will cause changes to spras source code to update the installed package. + +**Note:** This image will build for the same platform that is native to your system (i.e. amd64 or arm64). If you need to run this in a remote environment like HTCondor that is almost certainly `amd64` but you're building from Apple Silicon, it is recommended to either modify the Dockerfile to pin the platform: + +``` +FROM --platform=linux/amd64 almalinux:9 +``` + +Or to temporarily override your system's default during the build, prepend your build command with: +``` +DOCKER_DEFAULT_PLATFORM=linux/amd64 +``` + +For example, to build reedcompbio/spras:v0.1.0 on Apple Silicon as a linux/amd64 container, you'd run: +``` +DOCKER_DEFAULT_PLATFORM=linux/amd64 docker build -t reedcompbio/spras:v0.1.0 -f docker-wrappers/SPRAS/Dockerfile . +``` + +## Testing + +The folder `docker-wrappers/SPRAS` also contains several files that can be used to test this container on HTCondor. To test the `spras` container +in this environment, first login to an HTCondor Access Point (AP). Then, from the AP clone this repo: + +``` +git clone https://github.com/Reed-CompBio/spras.git +``` + +When you're ready to run SPRAS as an HTCondor workflow, navigate to the `spras/docker-wrappers/SPRAS` directory and create the `logs/` directory. Then run +`condor_submit spras.sub`, which will submit SPRAS to HTCondor as a single job with as many cores as indicated by the `NUM_PROCS` line in `spras.sub`, using +the value of `EXAMPLE_CONFIG` as the SPRAS configuration file. Note that you can alter the configuration file to test various workflows, but you should leave +`unpack_singularity = true`, or it is likely the job will be unsuccessful. By default, the `example_config.yaml` runs everything except for `cytoscape`, which +appears to fail periodically in HTCondor. + +To monitor the state of the job, you can run `condor_q` for a snapshot of how the job is doing, or you can run `condor_watch_q` if you want realtime updates. +Upon completion, the `output` directory from the workflow should be returned as `spras/docker-wrappers/SPRAS/output`, along with several files containing the +workflow's logging information (anything that matches `logs/spras_*` and ending in `.out`, `.err`, or `.log`). If the job was unsuccessful, these files should +contain useful debugging clues about what may have gone wrong. + +**Note**: If you want to run the workflow with a different version of SPRAS, or one that contains development updates you've made, rebuild this image against +the version of SPRAS you want to test, and push the image to your image repository. To use that container in the workflow, change the `container_image` line of +`spras.sub` to point to the new image. + +**Note**: In some cases, especially if you're encountering an error like `/srv//spras.sh: line 10: snakemake: command not found`, it may be necessary to convert +the SPRAS image to a `.sif` container image before running someplace like the OSPool. To do this, run: +``` +apptainer build spras.sif docker://reedcompbio/spras:v0.1.0 +``` +to produce the file `spras.sif`. Then, substitute this value as the `container_image` in the submit file. + +## Versions: + +The versions of this image match the version of the spras package within it. +- v0.1.0: Created an image with SPRAS as an installed python module. This makes SPRAS runnable anywhere with Docker/Singularity. Note that the Snakefile should be + runnable from any directory within the container. diff --git a/docker-wrappers/SPRAS/example_config.yaml b/docker-wrappers/SPRAS/example_config.yaml new file mode 100644 index 00000000..8b9c1edb --- /dev/null +++ b/docker-wrappers/SPRAS/example_config.yaml @@ -0,0 +1,151 @@ +# Global workflow control + +# The length of the hash used to identify a parameter combination +hash_length: 7 + +# Specify the container framework. Current supported versions include 'docker' and +# 'singularity'. If container_framework is not specified, SPRAS will default to docker. +container_framework: singularity + +# Unpack singularity. See config/config.yaml for details. +unpack_singularity: true + +# Allow the user to configure which container registry containers should be pulled from +# Note that this assumes container names are consistent across registries, and that the +# registry being passed doesn't require authentication for pull actions +container_registry: + base_url: docker.io + # The owner or project of the registry + # For example, "reedcompbio" if the image is available as docker.io/reedcompbio/allpairs + owner: reedcompbio + +# This list of algorithms should be generated by a script which checks the filesystem for installs. +# It shouldn't be changed by mere mortals. (alternatively, we could add a path to executable for each algorithm +# in the list to reduce the number of assumptions of the program at the cost of making the config a little more involved) +# Each algorithm has an 'include' parameter. By toggling 'include' to true/false the user can change +# which algorithms are run in a given experiment. +# +# algorithm-specific parameters are embedded in lists so that users can specify multiple. If multiple +# parameters are specified then the algorithm will be run as many times as needed to cover all parameter +# combinations. For instance if we have the following: +# - name: "myAlg" +# params: +# include: true +# a: [1,2] +# b: [0.5,0.75] +# +# then myAlg will be run on (a=1,b=0.5),(a=1,b=0.75),(a=2,b=0.5), and (a=2,b=0,75). Pretty neat, but be +# careful: too many parameters might make your runs take a long time. + +algorithms: + - name: "pathlinker" + params: + include: false + run1: + k: range(100,201,100) + + - name: "omicsintegrator1" + params: + include: true + run1: + r: [5] + b: [5, 6] + w: np.linspace(0,5,2) + g: [3] + d: [10] + + - name: "omicsintegrator2" + params: + include: true + run1: + b: [4] + g: [0] + run2: + b: [2] + g: [3] + + - name: "meo" + params: + include: true + run1: + max_path_length: [3] + local_search: ["Yes"] + rand_restarts: [10] + + - name: "mincostflow" + params: + include: true + run1: + flow: [1] # The flow must be an int + capacity: [1] + + - name: "allpairs" + params: + include: true + + - name: "domino" + params: + include: true + run1: + slice_threshold: [0.3] + module_threshold: [0.05] + + +# Here we specify which pathways to run and other file location information. +# DataLoader.py can currently only load a single dataset +# Assume that if a dataset label does not change, the lists of associated input files do not change +datasets: + - + label: data0 + node_files: ["node-prizes.txt", "sources.txt", "targets.txt"] + # DataLoader.py can currently only load a single edge file, which is the primary network + edge_files: ["network.txt"] + # Placeholder + other_files: [] + # Relative path from the spras directory + data_dir: "input" + # - + # label: data1 + # # Reuse some of the same sources file as 'data0' but different network and targets + # node_files: ["node-prizes.txt", "sources.txt", "alternative-targets.txt"] + # edge_files: ["alternative-network.txt"] + # other_files: [] + # # Relative path from the spras directory + # data_dir: "input" + +# If we want to reconstruct then we should set run to true. +# TODO: if include is true above but run is false here, algs are not run. +# is this the behavior we want? +reconstruction_settings: + + #set where everything is saved + locations: + + #place the save path here + # TODO move to global + reconstruction_dir: "output" + + run: true + +analysis: + # Create one summary per pathway file and a single summary table for all pathways for each dataset + summary: + include: true + # Create output files for each pathway that can be visualized with GraphSpace + graphspace: + include: true + # Create Cytoscape session file with all pathway graphs for each dataset + cytoscape: + include: false + # Machine learning analysis (e.g. clustering) of the pathway output files for each dataset + ml: + include: true + # specify how many principal components to calculate + components: 2 + # boolean to show the labels on the pca graph + labels: true + # 'ward', 'complete', 'average', 'single' + # if linkage: ward, must use metric: euclidean + linkage: 'ward' + # 'euclidean', 'manhattan', 'cosine' + metric: 'euclidean' diff --git a/docker-wrappers/SPRAS/spras.sh b/docker-wrappers/SPRAS/spras.sh new file mode 100755 index 00000000..cdfb924c --- /dev/null +++ b/docker-wrappers/SPRAS/spras.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# Fail early if there's an issue +set -e + +# When .cache files are created, they need to know where HOME is to write there. +# In this case, that should be the HTCondor scratch dir the job is executing in. +export HOME=$(pwd) + +snakemake "$@" diff --git a/docker-wrappers/SPRAS/spras.sub b/docker-wrappers/SPRAS/spras.sub new file mode 100644 index 00000000..b89945e7 --- /dev/null +++ b/docker-wrappers/SPRAS/spras.sub @@ -0,0 +1,86 @@ +############################################################ +# A submit file to demonstrate running SPRAS in the OSPool # +############################################################ + +############################################################ +# Define a few macros we use throughout the submit file # +############################################################ +CONFIG_FILE = example_config.yaml +NUM_PROCS = 4 +# Paths to input data and Snakefile. +INPUT_DIR = ../../input +SNAKEFILE = ../../Snakefile + +############################################################ +# Specify that the workflow should run in the SPRAS # +# container. In the OSPool, this image is usually # +# converted automatically to an Apptainer/Singularity # +# image, which is why the example config has # +# `unpack_singularity = true`. # +############################################################ +universe = container +container_image = docker://reedcompbio/spras:v0.1.0 +# container_image = spras.sif + + +############################################################ +# Specify names for log/stdout/stderr files generated by # +# HTCondor. # +# NOTE: You should `mkdir logs/` before running, or the # +# spras_$(Cluster).log file won't be available. # +############################################################ +log = logs/spras_$(Cluster)_$(Process).log +output = logs/spras_$(Cluster)_$(Process).out +error = logs/spras_$(Cluster)_$(Process).err + +############################################################ +# Specify the script to run inside the container. This is # +# simply a wrapper on the Snakefile. # +############################################################ +executable = spras.sh +arguments = "--cores $(NUM_PROCS) --configfile $(CONFIG_FILE) --retries 3" + +############################################################ +# Handle transferring required inputs/outputs # +############################################################ +should_transfer_files = YES +when_to_transfer_output = ON_EXIT +transfer_input_files = $(CONFIG_FILE), $(INPUT_DIR), $(SNAKEFILE) +# The output directory should match whatever you configure in your configfile. +transfer_output_files = output + +############################################################ +# System specifications. Be sure to request enough disk to # +# hold any additional containers that might be downloaded # +# and unpacked as part of the workflow. # +############################################################ +request_cpus = $(NUM_PROCS) +request_memory = 8GB +request_disk = 16GB + +############################################################ +# Specify a batch name that we can use to identify the # +# workflow via `condor_q`. # +############################################################ +JobBatchName = "SPRAS-workflow-OSPool" + +############################################################ +# Indicate that we want to run in the OSPool. This is only # +# needed if running from CHTC. If running from an OSPool # +# AP, omit this line. # +############################################################ ++WantGlideIn = true + +############################################################ +# Not all Execution Points in the OSPool will have # +# Apptainer (formerly Singularity) installed, but this is # +# a requirement to run SPRAS (since the OSPool is not # +# Docker friendly). To make sure we land somewhere with # +# Apptainer, we add it as a job requirement. If running # +# this submit file from CHTC, we also need a requirement # +# to prevent landing on a CHTC Execution Point. # +############################################################ +requirements = (HAS_SINGULARITY == True) && (Poolname =!= "CHTC") + +# Queue the job +queue 1 \ No newline at end of file diff --git a/environment.yml b/environment.yml index 75546ef0..bcbb69c0 100644 --- a/environment.yml +++ b/environment.yml @@ -3,14 +3,15 @@ channels: - conda-forge dependencies: - adjusttext=0.7.3.1 - - bioconda::snakemake-minimal=7.19.1 + - bioconda::snakemake-minimal=8.11.6 - docker-py=5.0 - - matplotlib=3.5 + - matplotlib=3.6 - networkx=2.8 - - pandas=1.4 + - pandas=1.5 + - numpy=1.26.4 - pre-commit=2.20 # Only required for development - - pytest=7.1 # Only required for development - - python=3.8 + - pytest=8.0 # Only required for development + - python=3.11 - pip=22.1 - requests=2.28 - scikit-learn=1.2 @@ -18,11 +19,11 @@ dependencies: - spython=0.2 # Only required for GraphSpace - commonmark=0.9 - - docutils=0.18 + - docutils=0.19 - jinja2=3.1 - mock=4.0 - recommonmark=0.7 - - sphinx=5.0 + - sphinx=6.0 - pip: - graphspace_python==1.3.1 - - sphinx-rtd-theme==1.2.0 + - sphinx-rtd-theme==2.0.0 diff --git a/pyproject.toml b/pyproject.toml index 5962fc3c..68d10f5c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "spras" -version = "0.0.1" +version = "0.1.0" description = "Signaling Pathway Reconstruction Analysis Streamliner" authors = [ { name = "Anthony Gitter", email = "gitter@biostat.wisc.edu" }, @@ -16,14 +16,16 @@ classifiers = [ "Programming Language :: Python :: 3", "Topic :: Scientific/Engineering :: Bio-Informatics", ] -requires-python = ">=3.8" +requires-python = ">=3.11" dependencies = [ "adjusttext==0.7.3", - "snakemake==7.19.1", + # A bug was introduced in older versions of snakemake that prevent it from running. Update to fix + "snakemake==8.11.6", "docker==5.0.3", # Switched from docker-py to docker because docker-py is not maintained in pypi. This appears to have no effect - "matplotlib==3.5", + "matplotlib==3.6", "networkx==2.8", - "pandas==1.4", + "pandas==1.5", + "numpy==1.26.4", "pip==22.1", "requests==2.28", "scikit-learn==1.2", @@ -31,20 +33,20 @@ dependencies = [ "spython==0.2", # Only required for GraphSpace "commonmark==0.9", - "docutils==0.18", + "docutils==0.19", "jinja2==3.1", "mock==4.0", "recommonmark==0.7", - "sphinx==5.0", + "sphinx==6.0", "graphspace_python==1.3.1", - "sphinx-rtd-theme==1.2.0", + "sphinx-rtd-theme==2.0.0", ] [project.optional-dependencies] dev = [ # Only required for development "pre-commit==2.20", - "pytest==7.1", + "pytest==8.0", ] [project.urls] @@ -56,7 +58,7 @@ requires = ["setuptools>=64.0"] build-backend = "setuptools.build_meta" [tool.ruff] -target-version = "py38" +target-version = "py311" # Autofix errors when possible fix = true # Select categories or specific rules from https://beta.ruff.rs/docs/rules/ diff --git a/spras/analysis/ml.py b/spras/analysis/ml.py index a1571988..a637d7fe 100644 --- a/spras/analysis/ml.py +++ b/spras/analysis/ml.py @@ -100,7 +100,7 @@ def create_palette(column_names): """ # TODO: could add a way for the user to customize the color palette? custom_palette = sns.color_palette("husl", len(column_names)) - label_color_map = {label: color for label, color in zip(column_names, custom_palette)} + label_color_map = {label: color for label, color in zip(column_names, custom_palette, strict=True)} return label_color_map diff --git a/spras/config.py b/spras/config.py index fdd51165..91676ca5 100644 --- a/spras/config.py +++ b/spras/config.py @@ -65,6 +65,8 @@ def __init__(self, raw_config): self.container_framework = None # The container prefix (host and organization) to use for images. Default is "docker.io/reedcompbio" self.container_prefix = DEFAULT_CONTAINER_PREFIX + # A Boolean specifying whether to unpack singularity containers. Default is False + self.unpack_singularity = False # A dictionary to store configured datasets against which SPRAS will be run self.datasets = None # The hash length SPRAS will use to identify parameter combinations. Default is 7 @@ -114,6 +116,14 @@ def process_config(self, raw_config): else: self.container_framework = "docker" + # Unpack settings for running in singularity mode. Needed when running PRM containers if already in a container. + if "unpack_singularity" in raw_config: + # The value in the config is a string, and we need to convert it to a bool. + unpack_singularity = raw_config["unpack_singularity"] + if unpack_singularity and self.container_framework != "singularity": + print("Warning: unpack_singularity is set to True, but the container framework is not singularity. This setting will have no effect.") + self.unpack_singularity = unpack_singularity + # Grab registry from the config, and if none is provided default to docker if "container_registry" in raw_config and raw_config["container_registry"]["base_url"] != "" and raw_config["container_registry"]["owner"] != "": self.container_prefix = raw_config["container_registry"]["base_url"] + "/" + raw_config["container_registry"]["owner"] @@ -179,7 +189,7 @@ def process_config(self, raw_config): run_list_tuples = list(it.product(*all_runs)) param_name_tuple = tuple(param_name_list) for r in run_list_tuples: - run_dict = dict(zip(param_name_tuple, r)) + run_dict = dict(zip(param_name_tuple, r, strict=True)) # TODO temporary workaround for yaml.safe_dump in Snakefile write_parameter_log for param, value in run_dict.copy().items(): if isinstance(value, np.float64): diff --git a/spras/containers.py b/spras/containers.py index bdb18acd..c9523129 100644 --- a/spras/containers.py +++ b/spras/containers.py @@ -181,6 +181,7 @@ def run_container_singularity(container: str, command: List[str], volumes: List[ bind_paths = [f'{prepare_path_docker(src)}:{dest}' for src, dest in volumes] # TODO is try/finally needed for Singularity? + # To debug a container add the execute arguments: singularity_options=['--debug'], quiet=False singularity_options = ['--cleanenv', '--containall', '--pwd', working_dir] # Singularity does not allow $HOME to be set as a regular environment variable # Capture it and use the special argument instead @@ -190,12 +191,37 @@ def run_container_singularity(container: str, command: List[str], volumes: List[ else: singularity_options.extend(['--env', environment]) - # To debug a container add the execute arguments: singularity_options=['--debug'], quiet=False - # Adding 'docker://' to the container indicates this is a Docker image Singularity must convert - return Client.execute('docker://' + container, - command, - options=singularity_options, - bind=bind_paths) + # Handle unpacking singularity image if needed. Potentially needed for running nested unprivileged containers + if config.config.unpack_singularity: + # Split the string by "/" + path_elements = container.split("/") + + # Get the last element, which will indicate the base container name + base_cont = path_elements[-1] + base_cont = base_cont.replace(":", "_").split(":")[0] + sif_file = base_cont + ".sif" + + # Adding 'docker://' to the container indicates this is a Docker image Singularity must convert + image_path = Client.pull('docker://' + container, name=sif_file) + + # Check if the directory for base_cont already exists. When running concurrent jobs, it's possible + # something else has already pulled/unpacked the container. + # Here, we expand the sif image from `image_path` to a directory indicated by `base_cont` + if not os.path.exists(base_cont): + Client.build(recipe=image_path, image=base_cont, sandbox=True, sudo=False) + + # Execute the locally unpacked container. + return Client.execute(base_cont, + command, + options=singularity_options, + bind=bind_paths) + + else: + # Adding 'docker://' to the container indicates this is a Docker image Singularity must convert + return Client.execute('docker://' + container, + command, + options=singularity_options, + bind=bind_paths) # Because this is called independently for each file, the same local path can be mounted to multiple volumes def prepare_volume(filename: Union[str, PurePath], volume_base: Union[str, PurePath]) -> Tuple[Tuple[PurePath, PurePath], str]: diff --git a/test/AllPairs/__init__.py b/test/AllPairs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/AllPairs/test_ap.py b/test/AllPairs/test_ap.py index b6aab9aa..442b26a7 100644 --- a/test/AllPairs/test_ap.py +++ b/test/AllPairs/test_ap.py @@ -54,6 +54,20 @@ def test_allpairs_singularity(self): container_framework="singularity") assert out_path.exists() + @pytest.mark.skipif(not shutil.which('singularity'), reason='Singularity not found on system') + def test_allpairs_singularity_unpacked(self): + out_path = Path(OUT_DIR+'sample-out-unpack.txt') + out_path.unlink(missing_ok=True) + # Indicate via config mechanism that we want to unpack the Singularity container + config.config.unpack_singularity = True + AllPairs.run( + nodetypes=TEST_DIR+'input/sample-in-nodetypes.txt', + network=TEST_DIR+'input/sample-in-net.txt', + output_file=str(out_path), + container_framework="singularity") + config.config.unpack_singularity = False + assert out_path.exists() + def test_allpairs_correctness(self): """ Tests algorithm correctness of all_pairs_shortest_path.py by using AllPairs.run diff --git a/test/DOMINO/__init__.py b/test/DOMINO/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/LocalNeighborhood/__init__.py b/test/LocalNeighborhood/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/MEO/__init__.py b/test/MEO/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/MinCostFlow/__init__.py b/test/MinCostFlow/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/OmicsIntegrator1/__init__.py b/test/OmicsIntegrator1/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/OmicsIntegrator2/__init__.py b/test/OmicsIntegrator2/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/PathLinker/__init__.py b/test/PathLinker/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/analysis/__init__.py b/test/analysis/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/generate-inputs/__init__.py b/test/generate-inputs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/interactome/__init__.py b/test/interactome/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/ml/__init__.py b/test/ml/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/parse-outputs/__init__.py b/test/parse-outputs/__init__.py new file mode 100644 index 00000000..e69de29b