diff --git a/.github/ISSUE_TEMPLATE/new_task.yml b/.github/ISSUE_TEMPLATE/new_task.yml
index 1b275f4227..227357d5a6 100644
--- a/.github/ISSUE_TEMPLATE/new_task.yml
+++ b/.github/ISSUE_TEMPLATE/new_task.yml
@@ -4,7 +4,7 @@ labels: [task]
body:
- type: markdown
attributes:
- value: Thanks for choosing OpenProblems. Please check the [OpenProblems tasks](https://github.com/openproblems-bio/openproblems-v2/issues?q=label%3Atask+) to see whether a similar task has already been created. If you haven't already, please review the documentation on [how to create a new task](https://openproblems.bio/documentation/create_task/).
+ value: Thanks for choosing OpenProblems. Please check the [OpenProblems tasks](https://github.com/openproblems-bio/openproblems/issues?q=label%3Atask+) to see whether a similar task has already been created. If you haven't already, please review the documentation on [how to create a new task](https://openproblems.bio/documentation/create_task/).
- type: textarea
attributes:
label: Task motivation
@@ -16,7 +16,7 @@ body:
- type: textarea
attributes:
label: Proposed ground-truth in datasets
- description: Describe the datasets you plan to use for your task. OpenProblems offers a standard set of datasets (See [“Common datasets”](https://openproblems.bio/documentation/reference/openproblems-v2/src-datasets.html)) which you can peruse through. Explain how these datasets will provide the ground-truth for evaluating the methods implemented in your task. If possible, include references or links to the datasets to facilitate reproducibility.
+ description: Describe the datasets you plan to use for your task. OpenProblems offers a standard set of datasets (See [“Common datasets”](https://openproblems.bio/documentation/reference/openproblems/src-datasets.html)) which you can peruse through. Explain how these datasets will provide the ground-truth for evaluating the methods implemented in your task. If possible, include references or links to the datasets to facilitate reproducibility.
- type: textarea
attributes:
label: Initial set of methods to implement
diff --git a/.github/workflows/release-build.yml b/.github/workflows/release-build.yml
index 7d89fac5cf..93b6f1eabf 100644
--- a/.github/workflows/release-build.yml
+++ b/.github/workflows/release-build.yml
@@ -16,7 +16,6 @@ jobs:
outputs:
component_matrix: ${{ steps.set_matrix.outputs.components }}
- workflow_matrix: ${{ steps.set_matrix.outputs.workflows }}
cache_key: ${{ steps.cache.outputs.cache_key }}
steps:
@@ -56,12 +55,6 @@ jobs:
src: src
format: json
- - id: ns_list_workflows
- uses: viash-io/viash-actions/ns-list@v6
- with:
- src: workflows
- format: json
-
- id: set_matrix
run: |
echo "components=$(jq -c '[ .[] |
@@ -72,14 +65,6 @@ jobs:
}
]' ${{ steps.ns_list_components.outputs.output_file }} )" >> $GITHUB_OUTPUT
- echo "workflows=$(jq -c '[ .[] |
- {
- "name": (.functionality.namespace + "/" + .functionality.name),
- "main_script": (.info.config | capture("^(?
.*\/)").dir + "/" + .functionality.test_resources[].path),
- "entry": .functionality.test_resources[].entrypoint
- }
- ]' ${{ steps.ns_list_workflows.outputs.output_file }} )" >> $GITHUB_OUTPUT
-
# phase 2
build:
needs: list
@@ -128,56 +113,6 @@ jobs:
###################################3
# phase 3
- integration_test:
- needs: [ build, list ]
- if: "${{ needs.list.outputs.workflow_matrix != '[]' }}"
-
- runs-on: ubuntu-latest
-
- strategy:
- fail-fast: false
- matrix:
- component: ${{ fromJson(needs.list.outputs.workflow_matrix) }}
-
- steps:
- # Remove unnecessary files to free up space. Otherwise, we get 'no space left on device.'
- - uses: data-intuitive/reclaim-the-bytes@v2
-
- - uses: actions/checkout@v4
-
- - uses: viash-io/viash-actions/setup@v6
-
- - uses: nf-core/setup-nextflow@v2.0.0
-
- # build target dir
- # use containers from release branch, hopefully these are available
- - name: Build target dir
- uses: viash-io/viash-actions/ns-build@v6
- with:
- config_mod: ".functionality.version := '${{ github.event.inputs.version_tag }}'"
- parallel: true
-
- # use cache
- - name: Cache resources data
- uses: actions/cache@v4
- timeout-minutes: 5
- with:
- path: resources_test
- key: ${{ needs.list.outputs.cache_key }}
-
- - name: Run integration test
- timeout-minutes: 45
- run: |
- # todo: replace with viash test command
- export NXF_VER=22.04.5
- nextflow run . \
- -main-script "${{ matrix.component.main_script }}" \
- -entry ${{ matrix.component.entry }} \
- -profile docker,mount_temp,no_publish \
- -c workflows/utils/labels_ci.config
-
- ###################################3
- # phase 4
component_test:
needs: [ build, list ]
if: ${{ needs.list.outputs.component_matrix != '[]' && needs.list.outputs.component_matrix != '' }}
@@ -208,4 +143,4 @@ jobs:
--config_mod ".functionality.version := '${{ github.event.inputs.version_tag }}'" \
"${{ matrix.component.config }}" \
--cpus 2 \
- --memory "5gb"
\ No newline at end of file
+ --memory "5gb"
diff --git a/.gitignore b/.gitignore
index c19f926ba4..4e328a4901 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,9 +3,11 @@ README_files/
*.DS_Store
*__pycache__
*.h5ad
+changelogs
# IDE ignores
/.idea/
+/.vscode/
# repo specific ignores
output_bash
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 7cb3263351..e662fc6472 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -5,12 +5,6 @@
"src/common/schemas/task_info.yaml": "src/**/api/task_info.yaml",
"src/common/schemas/task_method.yaml": "src/tasks/**/methods/**/config.vsh.yaml",
"src/common/schemas/task_control_method.yaml": "src/tasks/**/control_methods/**/config.vsh.yaml",
- "src/common/schemas/task_metric.yaml": "src/tasks/**/metrics/**/config.vsh.yaml",
- "/home/rcannood/.viash/releases/0.8.0/schema.json": [
- "*.vsh.yaml"
- ],
- "/home/rcannood/.viash/releases/0.8.6/schema.json": [
- "*.vsh.yaml"
- ]
+ "src/common/schemas/task_metric.yaml": "src/tasks/**/metrics/**/config.vsh.yaml"
}
}
\ No newline at end of file
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a57b23cbb0..a141e7571d 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,20 +1,9 @@
# Contributing to OpenProblems
-
-- [Code of conduct](#code-of-conduct)
-- [Requirements](#requirements)
-- [Quick start](#quick-start)
-- [Project structure](#project-structure)
-- [Adding a Viash component](#adding-a-viash-component)
-- [Running a component from CLI](#running-a-component-from-cli)
-- [Building a component](#building-a-component)
-- [Unit testing a component](#unit-testing-a-component)
-- [More information](#more-information)
-- [Branch Naming Conventions](#branch-naming-conventions)
-
[OpenProblems](https://openproblems.bio) is a community effort, and
everyone is welcome to contribute. This project is hosted on
-[github.com/openproblems-bio/openproblems-v2](https://github.com/openproblems-bio/openproblems-v2).
+[github.com/openproblems-bio/openproblems](https://github.com/openproblems-bio/openproblems).
+
You can find a full in depth guide on how to contribute to this project
on the [OpenProblems website](https://openproblems.bio/documentation/).
@@ -33,607 +22,3 @@ welcoming, diverse, inclusive, and healthy community.
Our full [Code of Conduct](CODE_OF_CONDUCT.md) is adapted from the
[Contributor Covenant](https://www.contributor-covenant.org), version
2.1.
-
-## Requirements
-
-To use this repository, please install the following dependencies:
-
-- Bash
-- Java (Java 11 or higher)
-- Docker (Instructions [here](https://docs.docker.com/get-docker/))
-- Nextflow (Optional, though [very easy to
- install](https://www.nextflow.io/index.html#GetStarted))
-
-## Quick start
-
-The `src/` folder contains modular software components for running a
-modality alignment benchmark. Running the full pipeline is quite easy.
-
-**Step 0, fetch Viash and Nextflow**
-
-``` bash
-mkdir $HOME/bin
-curl -fsSL get.viash.io | bash -s -- --bin $HOME/bin --tools false
-curl -s https://get.nextflow.io | bash; mv nextflow $HOME/bin
-```
-
-Make sure that Viash and Nextflow are on the \$PATH by checking whether
-the following commands work:
-
-``` bash
-viash -v
-nextflow -v
-```
-
- viash 0.8.0 (c) 2020 Data Intuitive
- nextflow version 23.04.1.5866
-
-**Step 1, download test resources:** by running the following command.
-
-``` bash
-viash run src/common/sync_test_resources/config.vsh.yaml
-```
-
- Completed 256.0 KiB/7.2 MiB (302.6 KiB/s) with 6 file(s) remaining
- Completed 512.0 KiB/7.2 MiB (595.8 KiB/s) with 6 file(s) remaining
- Completed 768.0 KiB/7.2 MiB (880.3 KiB/s) with 6 file(s) remaining
- Completed 1.0 MiB/7.2 MiB (1.1 MiB/s) with 6 file(s) remaining
- Completed 1.2 MiB/7.2 MiB (1.3 MiB/s) with 6 file(s) remaining
- ...
-
-**Step 2, build all the components:** in the `src/` folder as standalone
-executables in the `target/` folder. Use the `-q 'xxx'` parameter to
-build a subset of components in the repository.
-
-``` bash
-viash ns build --query 'label_projection|common' --parallel --setup cachedbuild
-```
-
- In development mode with 'dev'.
- Exporting process_dataset (label_projection) =docker=> target/docker/label_projection/process_dataset
- Exporting accuracy (label_projection/metrics) =docker=> target/docker/label_projection/metrics/accuracy
- Exporting random_labels (label_projection/control_methods) =docker=> target/docker/label_projection/control_methods/random_labels
- [notice] Building container 'label_projection/control_methods_random_labels:dev' with Dockerfile
- [notice] Building container 'common/data_processing_dataset_concatenate:dev' with Dockerfile
- [notice] Building container 'label_projection/metrics_accuracy:dev' with Dockerfile
- ...
-
-Viash will build a whole namespace (`ns`) into executables and Nextflow
-pipelines into the `target/docker` and `target/nextflow` folders
-respectively. By adding the `-q/--query` flag, you can filter which
-components to build using a regex. By adding the `--parallel` flag,
-these components are built in parallel (otherwise it will take a really
-long time). The flag `--setup cachedbuild` will automatically start
-building Docker containers for each of these methods.
-
-The command might take a while to run, since it is building a docker
-container for each of the components.
-
-**Step 3, run the pipeline with nextflow.** To do so, run the bash
-script located at
-`src/tasks/label_projection/workflows/run_nextflow.sh`:
-
-``` bash
-src/tasks/label_projection/workflows/run/run_test.sh
-```
-
- N E X T F L O W ~ version 22.04.5
- Launching `src/tasks/label_projection/workflows/run/main.nf` [pensive_turing] DSL2 - revision: 16b7b0c332
- executor > local (28)
- [f6/f89435] process > run_wf:run_methods:true_labels:true_labels_process (pancreas.true_labels) [100%] 1 of 1 ✔
- [ed/d674a2] process > run_wf:run_methods:majority_vote:majority_vote_process (pancreas.majority_vote) [100%] 1 of 1 ✔
- [15/f0a427] process > run_wf:run_methods:random_labels:random_labels_process (pancreas.random_labels) [100%] 1 of 1 ✔
- [02/969d05] process > run_wf:run_methods:knn:knn_process (pancreas.knn) [100%] 1 of 1 ✔
- [90/5fdf9a] process > run_wf:run_methods:mlp:mlp_process (pancreas.mlp) [100%] 1 of 1 ✔
- [c7/dee2e5] process > run_wf:run_methods:logistic_regression:logistic_regression_process (pancreas.logistic_regression) [100%] 1 of 1 ✔
- [83/3ba0c9] process > run_wf:run_methods:scanvi:scanvi_process (pancreas.scanvi) [100%] 1 of 1 ✔
- [e3/2c298e] process > run_wf:run_methods:seurat_transferdata:seurat_transferdata_process (pancreas.seurat_transferdata) [100%] 1 of 1 ✔
- [d6/7212ab] process > run_wf:run_methods:xgboost:xgboost_process (pancreas.xgboost) [100%] 1 of 1 ✔
- [b6/7dc1a7] process > run_wf:run_metrics:accuracy:accuracy_process (pancreas.scanvi) [100%] 9 of 9 ✔
- [be/7d4da4] process > run_wf:run_metrics:f1:f1_process (pancreas.scanvi) [100%] 9 of 9 ✔
- [89/dcd77a] process > run_wf:aggregate_results:extract_scores:extract_scores_process (combined) [100%] 1 of 1 ✔
-
-## Project structure
-
-High level overview: . ├── bin Helper scripts for building the project
-and developing a new component. ├── resources_test Datasets for testing
-components. If you don’t have this folder, run **Step 1** above. ├── src
-Source files for each component in the pipeline. │ ├── common Common
-processing components. │ ├── datasets Components and pipelines for
-building the ‘Common datasets’ │ ├── label_projection Source files
-related to the ‘Label projection’ task. │ └── … Other tasks. └── target
-Executables generated by viash based on the components listed under
-`src/`. ├── docker Bash executables which can be used from a terminal.
-└── nextflow Nextflow modules which can be used as a standalone pipeline
-or as part of a bigger pipeline.
-
-Detailed overview of a task folder (e.g. `src/tasks/label_projection`):
-
- src/tasks/label_projection/
- ├── api Specs for the components in this task.
- ├── control_methods Control methods which serve as quality control checks for the benchmark.
- ├── docs Task documentation
- ├── methods Label projection method components.
- ├── metrics Label projection metric components.
- ├── resources_scripts The scripts needed to run the benchmark.
- ├── resources_test_scripts The scripts needed to generate the test resources (which are needed for unit testing).
- ├── process_dataset A component that masks a common dataset for use in the benchmark
- └── workflows The benchmarking workflow.
-
-Detailed overview of the `src/datasets` folder:
-
- src/datasets/
- ├── api Specs for the data loaders and normalisation methods.
- ├── loaders Components for ingesting datasets from a source.
- ├── normalization Normalization method components.
- ├── processors Other preprocessing components (e.g. HVG and PCA).
- ├── resource_scripts The scripts needed to generate the common datasets.
- ├── resource_test_scripts The scripts needed to generate the test resources (which are needed for unit testing).
- └── workflows The workflow which generates the common datasets.
-
-## Adding a Viash component
-
-[Viash](https://viash.io) allows you to create pipelines in Bash or
-Nextflow by wrapping Python, R, or Bash scripts into reusable
-components.
-
-You can start creating a new component by [creating a Viash
-component](https://viash.io/guide/component/creation/docker.html).
-
-For example, to create a new Python-based method named `foo`, create a
-Viash config at
-`src/tasks/label_projection/methods/foo/config.vsh.yaml`:
-
-``` yaml
-__merge__: ../../api/comp_method.yaml
-functionality:
- name: "foo"
- namespace: "label_projection/methods"
- # A multiline description of your method.
- description: "Todo: fill in"
- info:
- type: method
-
- # a short label of your method
- label: Foo
-
- # A multiline description of your method.
- description: "Todo: fill in"
-
- # A short summary of the method description.
- summary: "Todo: fill in"
-
- # Add the bibtex reference to the "src/common/library.bib" if it is not already there.
- reference: "cover1967nearest"
-
- repository_url: "https://github.com/openproblems-bio/openproblems-v2"
- documentation_url: "https://openproblems.bio/documentation/"
- preferred_normalization: log_cp10k
- resources:
- - type: python_script
- path: script.py
-platforms:
- - type: docker
- image: ghcr.io/openproblems-bio/base_python:1.0.2
- setup:
- - type: python
- packages: [scikit-learn]
- - type: nextflow
- directives:
- label: [midtime, midmem, lowcpu]
-```
-
-And create a script at
-`src/tasks/label_projection/methods/foo/script.py`:
-
-``` python
-import anndata as ad
-import numpy as np
-
-## VIASH START
-# This code-block will automatically be replaced by Viash at runtime.
-par = {
- 'input_train': 'resources_test/label_projection/pancreas/train.h5ad',
- 'input_test': 'resources_test/label_projection/pancreas/test.h5ad',
- 'output': 'output.h5ad'
-}
-meta = {
- 'functionality_name': 'foo'
-}
-## VIASH END
-
-print("Load data", flush=True)
-input_train = ad.read_h5ad(par['input_train'])
-input_test = ad.read_h5ad(par['input_test'])
-
-print("Create predictions", flush=True)
-input_test.obs["label_pred"] = "foo"
-
-print("Add method name to uns", flush=True)
-input_test.uns["method_id"] = meta["functionality_name"]
-
-print("Write output to file", flush=True)
-input_test.write_h5ad(par["output"], compression="gzip")
-```
-
-## Running a component from CLI
-
-You can view the interface of the executable by running the executable
-with the `-h` or `--help` parameter.
-
-``` bash
-viash run src/tasks/label_projection/methods/foo/config.vsh.yaml -- --help
-```
-
- foo dev
-
- Todo: fill in
-
- Arguments:
- --input_train
- type: file, required parameter, file must exist
- example: resources_test/label_projection/pancreas/train.h5ad
-
- --input_test
- type: file, required parameter, file must exist
- example: resources_test/label_projection/pancreas/test.h5ad
-
- --output
- type: file, required parameter, output, file must exist
- example: resources_test/label_projection/pancreas/prediction.h5ad
-
-Before running a new component, youy will need to create the docker
-container:
-
-``` bash
-viash run src/tasks/label_projection/methods/foo/config.vsh.yaml -- ---setup cachedbuild
-```
-
- [notice] Building container 'ghcr.io/openproblems-bio/label_projection/methods/foo:dev' with Dockerfile
-
-You can **run the component** as follows:
-
-``` bash
-viash run src/tasks/label_projection/methods/foo/config.vsh.yaml -- \
- --input_train resources_test/label_projection/pancreas/train.h5ad \
- --input_test resources_test/label_projection/pancreas/test.h5ad \
- --output resources_test/label_projection/pancreas/prediction.h5ad
-```
-
- Load data
- Create predictions
- Add method name to uns
- Write output to file
-
-## Building a component
-
-`viash` has several helper functions to help you quickly develop a
-component.
-
-With **`viash build`**, you can turn the component into a standalone
-executable. This standalone executable you can give to somebody else,
-and they will be able to run it, provided that they have Bash and Docker
-installed.
-
-``` bash
-viash build src/tasks/label_projection/methods/foo/config.vsh.yaml \
- -o target/docker/label_projection/methods/foo
-```
-
-> [!NOTE]
->
-> The `viash ns build` component does a much better job of setting up a
-> collection of components.
-
-You can now view the same interface of the executable by running the
-executable with the `-h` parameter.
-
-``` bash
-target/docker/label_projection/methods/foo/foo -h
-```
-
- foo dev
-
- Todo: fill in
-
- Arguments:
- --input_train
- type: file, required parameter, file must exist
- example: resources_test/label_projection/pancreas/train.h5ad
-
- --input_test
- type: file, required parameter, file must exist
- example: resources_test/label_projection/pancreas/test.h5ad
-
- --output
- type: file, required parameter, output, file must exist
- example: resources_test/label_projection/pancreas/prediction.h5ad
-
-Or **run the component** as follows:
-
-``` bash
-target/docker/label_projection/methods/foo/foo \
- --input_train resources_test/label_projection/pancreas/train.h5ad \
- --input_test resources_test/label_projection/pancreas/test.h5ad \
- --output resources_test/label_projection/pancreas/prediction.h5ad
-```
-
- Load data
- Create predictions
- Add method name to uns
- Write output to file
-
-## Unit testing a component
-
-The [method API
-specifications](src/tasks/label_projection/api/comp_method.yaml) comes
-with a generic unit test for free. This means you can unit test your
-component using the **`viash test`** command.
-
-``` bash
-viash test src/tasks/label_projection/methods/foo/config.vsh.yaml
-```
-
- Running tests in temporary directory: '/tmp/viash_test_foo11070556749764805852'
- ====================================================================
- +/tmp/viash_test_foo11070556749764805852/build_executable/foo ---verbosity 6 ---setup cachedbuild
- [notice] Building container 'ghcr.io/openproblems-bio/label_projection/methods/foo:test' with Dockerfile
- [info] Running 'docker build -t ghcr.io/openproblems-bio/label_projection/methods/foo:test /tmp/viash_test_foo11070556749764805852/build_executable -f /tmp/viash_test_foo11070556749764805852/build_executable/tmp/dockerbuild-foo-VMKj2u/Dockerfile'
- #0 building with "default" instance using docker driver
-
- #1 [internal] load build definition from Dockerfile
- #1 transferring dockerfile: 658B done
- #1 DONE 0.1s
-
- #2 [internal] load .dockerignore
- #2 transferring context: 2B done
- #2 DONE 0.1s
-
- #3 [internal] load metadata for ghcr.io/openproblems-bio/base_python:1.0.2
- #3 DONE 0.3s
-
- #4 [1/2] FROM ghcr.io/openproblems-bio/base_python:1.0.2@sha256:65a577a3de37665b7a65548cb33c9153b6881742345593d33fe02919c8d66a20
- #4 DONE 0.0s
-
- #5 [2/2] RUN pip install --upgrade pip && pip install --upgrade --no-cache-dir "scikit-learn"
- #5 CACHED
-
- #6 exporting to image
- #6 exporting layers done
- #6 writing image sha256:b5c134ce2ab91a0e616d7362f6bd168e6494c4a1bd7c643d62d7ad65d8678c5b done
- #6 naming to ghcr.io/openproblems-bio/label_projection/methods/foo:test 0.0s done
- #6 DONE 0.0s
- ====================================================================
- +/tmp/viash_test_foo11070556749764805852/test_check_method_config/test_executable
- Load config data
- Check general fields
- Check info fields
- Check platform fields
- All checks succeeded!
- ====================================================================
- +/tmp/viash_test_foo11070556749764805852/test_run_and_check_adata/test_executable
- >> Running test 'run'
- >> Checking whether input files exist
- >> Running script as test
- Load data
- Create predictions
- Add method name to uns
- Write output to file
- >> Checking whether output file exists
- >> Reading h5ad files and checking formats
- Reading and checking input_train
- AnnData object with n_obs × n_vars = 326 × 500
- obs: 'label', 'batch'
- var: 'hvg', 'hvg_score'
- uns: 'dataset_id', 'normalization_id'
- obsm: 'X_pca'
- layers: 'counts', 'normalized'
- Reading and checking input_test
- AnnData object with n_obs × n_vars = 174 × 500
- obs: 'batch'
- var: 'hvg', 'hvg_score'
- uns: 'dataset_id', 'normalization_id'
- obsm: 'X_pca'
- layers: 'counts', 'normalized'
- Reading and checking output
- AnnData object with n_obs × n_vars = 174 × 500
- obs: 'batch', 'label_pred'
- var: 'hvg', 'hvg_score'
- uns: 'dataset_id', 'method_id', 'normalization_id'
- obsm: 'X_pca'
- layers: 'counts', 'normalized'
- All checks succeeded!
- ====================================================================
- SUCCESS! All 2 out of 2 test scripts succeeded!
- Cleaning up temporary directory
-
-Let’s introduce a bug in the script and try running the test again. For
-instance:
-
-``` python
-import anndata as ad
-import numpy as np
-
-## VIASH START
-# This code-block will automatically be replaced by Viash at runtime.
-par = {
- 'input_train': 'resources_test/label_projection/pancreas/train.h5ad',
- 'input_test': 'resources_test/label_projection/pancreas/test.h5ad',
- 'output': 'output.h5ad'
-}
-meta = {
- 'functionality_name': 'foo'
-}
-## VIASH END
-
-print("Load data", flush=True)
-input_train = ad.read_h5ad(par['input_train'])
-input_test = ad.read_h5ad(par['input_test'])
-
-print("Not creating any predictions!!!", flush=True)
-# input_test.obs["label_pred"] = "foo"
-
-print("Not adding method name to uns!!!", flush=True)
-# input_test.uns["method_id"] = meta["functionality_name"]
-
-print("Write output to file", flush=True)
-input_test.write_h5ad(par["output"], compression="gzip")
-```
-
-If we now run the test, we should get an error since we didn’t create
-all of the required output slots.
-
-``` bash
-viash test src/tasks/label_projection/methods/foo/config.vsh.yaml
-```
-
- Running tests in temporary directory: '/tmp/viash_test_foo11839237358029204600'
- ====================================================================
- +/tmp/viash_test_foo11839237358029204600/build_executable/foo ---verbosity 6 ---setup cachedbuild
- [notice] Building container 'ghcr.io/openproblems-bio/label_projection/methods/foo:test' with Dockerfile
- [info] Running 'docker build -t ghcr.io/openproblems-bio/label_projection/methods/foo:test /tmp/viash_test_foo11839237358029204600/build_executable -f /tmp/viash_test_foo11839237358029204600/build_executable/tmp/dockerbuild-foo-gPvc8b/Dockerfile'
- #0 building with "default" instance using docker driver
-
- #1 [internal] load build definition from Dockerfile
- #1 transferring dockerfile: 658B done
- #1 DONE 0.1s
-
- #2 [internal] load .dockerignore
- #2 transferring context: 2B done
- #2 DONE 0.1s
-
- #3 [internal] load metadata for ghcr.io/openproblems-bio/base_python:1.0.2
- #3 DONE 0.3s
-
- #4 [1/2] FROM ghcr.io/openproblems-bio/base_python:1.0.2@sha256:65a577a3de37665b7a65548cb33c9153b6881742345593d33fe02919c8d66a20
- #4 DONE 0.0s
-
- #5 [2/2] RUN pip install --upgrade pip && pip install --upgrade --no-cache-dir "scikit-learn"
- #5 CACHED
-
- #6 exporting to image
- #6 exporting layers done
- #6 writing image sha256:939f5846475192d821898f663f15872432e7a2c9033b38ac9b9522155270daf4 done
- #6 naming to ghcr.io/openproblems-bio/label_projection/methods/foo:test 0.0s done
- #6 DONE 0.0s
- ====================================================================
- +/tmp/viash_test_foo11839237358029204600/test_check_method_config/test_executable
- Load config data
- Check general fields
- Check info fields
- Check platform fields
- All checks succeeded!
- ====================================================================
- +/tmp/viash_test_foo11839237358029204600/test_run_and_check_adata/test_executable
- >> Running test 'run'
- >> Checking whether input files exist
- >> Running script as test
- Load data
- Not creating any predictions!!!
- Not adding method name to uns!!!
- Write output to file
- >> Checking whether output file exists
- >> Reading h5ad files and checking formats
- Reading and checking input_train
- AnnData object with n_obs × n_vars = 326 × 500
- obs: 'label', 'batch'
- var: 'hvg', 'hvg_score'
- uns: 'dataset_id', 'normalization_id'
- obsm: 'X_pca'
- layers: 'counts', 'normalized'
- Reading and checking input_test
- AnnData object with n_obs × n_vars = 174 × 500
- obs: 'batch'
- var: 'hvg', 'hvg_score'
- uns: 'dataset_id', 'normalization_id'
- obsm: 'X_pca'
- layers: 'counts', 'normalized'
- Reading and checking output
- AnnData object with n_obs × n_vars = 174 × 500
- Traceback (most recent call last):
- obs: 'batch'
- var: 'hvg', 'hvg_score'
- File "/viash_automount/tmp/viash_test_foo11839237358029204600/test_run_and_check_adata/tmp//viash-run-foo-22aQh6.py", line 138, in
- uns: 'dataset_id', 'normalization_id'
- run_and_check(argset_args, cmd)
- obsm: 'X_pca'
- File "/viash_automount/tmp/viash_test_foo11839237358029204600/test_run_and_check_adata/tmp//viash-run-foo-22aQh6.py", line 81, in run_and_check
- layers: 'counts', 'normalized'
- check_slots(adata, arg)
- File "/viash_automount/tmp/viash_test_foo11839237358029204600/test_run_and_check_adata/tmp//viash-run-foo-22aQh6.py", line 48, in check_slots
- assert slot_item["name"] in struc_x,\
- AssertionError: File 'output.h5ad' is missing slot .obs['label_pred']
- ====================================================================
- ERROR! Only 1 out of 2 test scripts succeeded!
- Unexpected error occurred! If you think this is a bug, please post
- create an issue at https://github.com/viash-io/viash/issues containing
- a reproducible example and the stack trace below.
-
- viash - 0.8.0
- Stacktrace:
- java.lang.RuntimeException: Only 1 out of 2 test scripts succeeded!
- at io.viash.ViashTest$.apply(ViashTest.scala:134)
- at io.viash.Main$.mainCLI(Main.scala:253)
- at io.viash.Main$.mainCLIOrVersioned(Main.scala:123)
- at io.viash.Main$.main(Main.scala:58)
- at io.viash.Main.main(Main.scala)
-
-## More information
-
-The [Viash reference docs](https://viash.io/reference/config/) page
-provides information on all of the available fields in a Viash config,
-and the [Guide](https://viash.io/guide/) will help you get started with
-creating components from scratch.
-
-
-
-## Branch Naming Conventions
-
-### Category
-
-A git branch should start with a category. Pick one of these: feature,
-bugfix, hotfix, or test.
-
-- `feature` is for adding, refactoring or removing a feature
-- `bugfix` is for fixing a bug
-- `hotfix` is for changing code with a temporary solution and/or without
- following the usual process (usually because of an emergency)
-- `test` is for experimenting outside of an issue/ticket
-- `doc` is for adding, changing or removing documentation
-
-### Reference
-
-After the category, there should be a “`/`” followed by the reference of
-the issue/ticket/task you are working on. If there’s no reference, just
-add no-ref. With task it is meant as benchmarking task
-e.g. batch_integration
-
-### Description
-
-After the reference, there should be another “`/`” followed by a
-description which sums up the purpose of this specific branch. This
-description should be short and “kebab-cased”.
-
-By default, you can use the title of the issue/ticket you are working
-on. Just replace any special character by “`-`”.
-
-### To sum up, follow this pattern when branching:
-
-``` bash
-git branch
-```
-
-### Examples
-
-- You need to add, refactor or remove a feature:
- `git branch feature/issue-42/create-new-button-component`
-- You need to fix a bug:
- `git branch bugfix/issue-342/button-overlap-form-on-mobile`
-- You need to fix a bug really fast (possibly with a temporary
- solution): `git branch hotfix/no-ref/registration-form-not-working`
-- You need to experiment outside of an issue/ticket:
- `git branch test/no-ref/refactor-components-with-atomic-design`
-
-### References
-
-- [a-simplified-convention-for-naming-branches-and-commits-in-git](https://dev.to/varbsan/a-simplified-convention-for-naming-branches-and-commits-in-git-il4)
diff --git a/CONTRIBUTING.qmd b/CONTRIBUTING.qmd
deleted file mode 100644
index 6b6e33ae07..0000000000
--- a/CONTRIBUTING.qmd
+++ /dev/null
@@ -1,401 +0,0 @@
----
-title: Contributing to OpenProblems
-format: gfm
-toc: true
-toc-depth: 2
-engine: knitr
----
-
-[OpenProblems](https://openproblems.bio) is a community effort, and everyone is welcome to contribute. This project is hosted on [github.com/openproblems-bio/openproblems-v2](https://github.com/openproblems-bio/openproblems-v2). You can find a full in depth guide on how to contribute to this project on the [OpenProblems website](https://openproblems.bio/documentation/).
-
-## Code of conduct {#code-of-conduct}
-
-We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation.
-
-We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community.
-
-Our full [Code of Conduct](CODE_OF_CONDUCT.md) is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 2.1.
-
-
-## Requirements
-
-To use this repository, please install the following dependencies:
-
-* Bash
-* Java (Java 11 or higher)
-* Docker (Instructions [here](https://docs.docker.com/get-docker/))
-* Nextflow (Optional, though [very easy to install](https://www.nextflow.io/index.html#GetStarted))
-
-## Quick start
-
-The `src/` folder contains modular software components for running a modality alignment benchmark. Running the full pipeline is quite easy.
-
-**Step 0, fetch Viash and Nextflow**
-
-```bash
-mkdir $HOME/bin
-curl -fsSL get.viash.io | bash -s -- --bin $HOME/bin --tools false
-curl -s https://get.nextflow.io | bash; mv nextflow $HOME/bin
-```
-
-Make sure that Viash and Nextflow are on the $PATH by checking whether the following commands work:
-
-```{bash}
-viash -v
-nextflow -v
-```
-
-**Step 1, download test resources:** by running the following command.
-
-```bash
-viash run src/common/sync_test_resources/config.vsh.yaml
-```
-
- Completed 256.0 KiB/7.2 MiB (302.6 KiB/s) with 6 file(s) remaining
- Completed 512.0 KiB/7.2 MiB (595.8 KiB/s) with 6 file(s) remaining
- Completed 768.0 KiB/7.2 MiB (880.3 KiB/s) with 6 file(s) remaining
- Completed 1.0 MiB/7.2 MiB (1.1 MiB/s) with 6 file(s) remaining
- Completed 1.2 MiB/7.2 MiB (1.3 MiB/s) with 6 file(s) remaining
- ...
-
-**Step 2, build all the components:** in the `src/` folder as standalone executables in the `target/` folder. Use the `-q 'xxx'` parameter to build a subset of components in the repository.
-
-```bash
-viash ns build --query 'label_projection|common' --parallel --setup cachedbuild
-```
-
- In development mode with 'dev'.
- Exporting process_dataset (label_projection) =docker=> target/docker/label_projection/process_dataset
- Exporting accuracy (label_projection/metrics) =docker=> target/docker/label_projection/metrics/accuracy
- Exporting random_labels (label_projection/control_methods) =docker=> target/docker/label_projection/control_methods/random_labels
- [notice] Building container 'label_projection/control_methods_random_labels:dev' with Dockerfile
- [notice] Building container 'common/data_processing_dataset_concatenate:dev' with Dockerfile
- [notice] Building container 'label_projection/metrics_accuracy:dev' with Dockerfile
- ...
-
-Viash will build a whole namespace (`ns`) into executables and Nextflow pipelines into the `target/docker` and `target/nextflow` folders respectively.
-By adding the `-q/--query` flag, you can filter which components to build using a regex.
-By adding the `--parallel` flag, these components are built in parallel (otherwise it will take a really long time).
-The flag `--setup cachedbuild` will automatically start building Docker containers for each of these methods.
-
-The command might take a while to run, since it is building a docker container for each of the components.
-
-**Step 3, run the pipeline with nextflow.** To do so, run the bash script located at `src/tasks/label_projection/workflows/run_nextflow.sh`:
-
-```bash
-src/tasks/label_projection/workflows/run/run_test.sh
-```
-
- N E X T F L O W ~ version 22.04.5
- Launching `src/tasks/label_projection/workflows/run/main.nf` [pensive_turing] DSL2 - revision: 16b7b0c332
- executor > local (28)
- [f6/f89435] process > run_wf:run_methods:true_labels:true_labels_process (pancreas.true_labels) [100%] 1 of 1 ✔
- [ed/d674a2] process > run_wf:run_methods:majority_vote:majority_vote_process (pancreas.majority_vote) [100%] 1 of 1 ✔
- [15/f0a427] process > run_wf:run_methods:random_labels:random_labels_process (pancreas.random_labels) [100%] 1 of 1 ✔
- [02/969d05] process > run_wf:run_methods:knn:knn_process (pancreas.knn) [100%] 1 of 1 ✔
- [90/5fdf9a] process > run_wf:run_methods:mlp:mlp_process (pancreas.mlp) [100%] 1 of 1 ✔
- [c7/dee2e5] process > run_wf:run_methods:logistic_regression:logistic_regression_process (pancreas.logistic_regression) [100%] 1 of 1 ✔
- [83/3ba0c9] process > run_wf:run_methods:scanvi:scanvi_process (pancreas.scanvi) [100%] 1 of 1 ✔
- [e3/2c298e] process > run_wf:run_methods:seurat_transferdata:seurat_transferdata_process (pancreas.seurat_transferdata) [100%] 1 of 1 ✔
- [d6/7212ab] process > run_wf:run_methods:xgboost:xgboost_process (pancreas.xgboost) [100%] 1 of 1 ✔
- [b6/7dc1a7] process > run_wf:run_metrics:accuracy:accuracy_process (pancreas.scanvi) [100%] 9 of 9 ✔
- [be/7d4da4] process > run_wf:run_metrics:f1:f1_process (pancreas.scanvi) [100%] 9 of 9 ✔
- [89/dcd77a] process > run_wf:aggregate_results:extract_scores:extract_scores_process (combined) [100%] 1 of 1 ✔
-
-## Project structure
-
-High level overview:
- .
- ├── bin Helper scripts for building the project and developing a new component.
- ├── resources_test Datasets for testing components. If you don't have this folder, run **Step 1** above.
- ├── src Source files for each component in the pipeline.
- │ ├── common Common processing components.
- │ ├── datasets Components and pipelines for building the 'Common datasets'
- │ ├── label_projection Source files related to the 'Label projection' task.
- │ └── ... Other tasks.
- └── target Executables generated by viash based on the components listed under `src/`.
- ├── docker Bash executables which can be used from a terminal.
- └── nextflow Nextflow modules which can be used as a standalone pipeline or as part of a bigger pipeline.
-
-Detailed overview of a task folder (e.g. `src/tasks/label_projection`):
-
- src/tasks/label_projection/
- ├── api Specs for the components in this task.
- ├── control_methods Control methods which serve as quality control checks for the benchmark.
- ├── docs Task documentation
- ├── methods Label projection method components.
- ├── metrics Label projection metric components.
- ├── resources_scripts The scripts needed to run the benchmark.
- ├── resources_test_scripts The scripts needed to generate the test resources (which are needed for unit testing).
- ├── process_dataset A component that masks a common dataset for use in the benchmark
- └── workflows The benchmarking workflow.
-
-
-Detailed overview of the `src/datasets` folder:
-
- src/datasets/
- ├── api Specs for the data loaders and normalisation methods.
- ├── loaders Components for ingesting datasets from a source.
- ├── normalization Normalization method components.
- ├── processors Other preprocessing components (e.g. HVG and PCA).
- ├── resource_scripts The scripts needed to generate the common datasets.
- ├── resource_test_scripts The scripts needed to generate the test resources (which are needed for unit testing).
- └── workflows The workflow which generates the common datasets.
-
-## Adding a Viash component
-
-[Viash](https://viash.io) allows you to create pipelines
-in Bash or Nextflow by wrapping Python, R, or Bash scripts into reusable components.
-
-
-You can start creating a new component by [creating a Viash component](https://viash.io/guide/component/creation/docker.html).
-
-
-```{bash, include=FALSE}
-
-mkdir -p src/tasks/label_projection/methods/foo
-
-cat > src/tasks/label_projection/methods/foo/config.vsh.yaml << HERE
-__merge__: ../../api/comp_method.yaml
-functionality:
- name: "foo"
- namespace: "label_projection/methods"
- # A multiline description of your method.
- description: "Todo: fill in"
- info:
- type: method
-
- # a short label of your method
- label: Foo
-
- # A multiline description of your method.
- description: "Todo: fill in"
-
- # A short summary of the method description.
- summary: "Todo: fill in"
-
- # Add the bibtex reference to the "src/common/library.bib" if it is not already there.
- reference: "cover1967nearest"
-
- repository_url: "https://github.com/openproblems-bio/openproblems-v2"
- documentation_url: "https://openproblems.bio/documentation/"
- preferred_normalization: log_cp10k
- resources:
- - type: python_script
- path: script.py
-platforms:
- - type: docker
- image: openproblems/base_python:1.0.0
- setup:
- - type: python
- packages: [scikit-learn]
- - type: nextflow
- directives:
- label: [midtime, midmem, lowcpu]
-HERE
-
-cat > src/tasks/label_projection/methods/foo/script.py << HERE
-import anndata as ad
-import numpy as np
-
-## VIASH START
-# This code-block will automatically be replaced by Viash at runtime.
-par = {
- 'input_train': 'resources_test/label_projection/pancreas/train.h5ad',
- 'input_test': 'resources_test/label_projection/pancreas/test.h5ad',
- 'output': 'output.h5ad'
-}
-meta = {
- 'functionality_name': 'foo'
-}
-## VIASH END
-
-print("Load data", flush=True)
-input_train = ad.read_h5ad(par['input_train'])
-input_test = ad.read_h5ad(par['input_test'])
-
-print("Create predictions", flush=True)
-input_test.obs["label_pred"] = "foo"
-
-print("Add method name to uns", flush=True)
-input_test.uns["method_id"] = meta["functionality_name"]
-
-print("Write output to file", flush=True)
-input_test.write_h5ad(par["output"], compression="gzip")
-HERE
-```
-
-For example, to create a new Python-based method named `foo`, create a Viash config at `src/tasks/label_projection/methods/foo/config.vsh.yaml`:
-
-```{embed lang="yaml"}
-src/tasks/label_projection/methods/foo/config.vsh.yaml
-```
-
-And create a script at `src/tasks/label_projection/methods/foo/script.py`:
-
-```{embed lang="python"}
-src/tasks/label_projection/methods/foo/script.py
-```
-
-
-## Running a component from CLI
-
-You can view the interface of the executable by running the executable with the `-h` or `--help` parameter.
-
-```{bash}
-viash run src/tasks/label_projection/methods/foo/config.vsh.yaml -- --help
-```
-
-Before running a new component, youy will need to create the docker container:
-
-```{bash}
-viash run src/tasks/label_projection/methods/foo/config.vsh.yaml -- ---setup cachedbuild
-
-```
-
-You can **run the component** as follows:
-
-```{bash}
-viash run src/tasks/label_projection/methods/foo/config.vsh.yaml -- \
- --input_train resources_test/label_projection/pancreas/train.h5ad \
- --input_test resources_test/label_projection/pancreas/test.h5ad \
- --output resources_test/label_projection/pancreas/prediction.h5ad
-```
-
-## Building a component
-
-`viash` has several helper functions to help you quickly develop a component.
-
-With **`viash build`**, you can turn the component into a standalone executable.
-This standalone executable you can give to somebody else, and they will be able to
-run it, provided that they have Bash and Docker installed.
-
-```{bash}
-viash build src/tasks/label_projection/methods/foo/config.vsh.yaml \
- -o target/docker/label_projection/methods/foo
-```
-
-:::{.callout-note}
-The `viash ns build` component does a much better job of setting up
-a collection of components.
-:::
-
-You can now view the same interface of the executable by running the executable with the `-h` parameter.
-
-```{bash}
-target/docker/label_projection/methods/foo/foo -h
-```
-
-Or **run the component** as follows:
-
-```{bash}
-target/docker/label_projection/methods/foo/foo \
- --input_train resources_test/label_projection/pancreas/train.h5ad \
- --input_test resources_test/label_projection/pancreas/test.h5ad \
- --output resources_test/label_projection/pancreas/prediction.h5ad
-```
-
-
-## Unit testing a component
-
-The [method API specifications](src/tasks/label_projection/api/comp_method.yaml) comes with a generic unit test for free.
-This means you can unit test your component using the **`viash test`** command.
-
-```{bash}
-viash test src/tasks/label_projection/methods/foo/config.vsh.yaml
-```
-
-```{bash include=FALSE}
-cat > src/tasks/label_projection/methods/foo/script.py << HERE
-import anndata as ad
-import numpy as np
-
-## VIASH START
-# This code-block will automatically be replaced by Viash at runtime.
-par = {
- 'input_train': 'resources_test/label_projection/pancreas/train.h5ad',
- 'input_test': 'resources_test/label_projection/pancreas/test.h5ad',
- 'output': 'output.h5ad'
-}
-meta = {
- 'functionality_name': 'foo'
-}
-## VIASH END
-
-print("Load data", flush=True)
-input_train = ad.read_h5ad(par['input_train'])
-input_test = ad.read_h5ad(par['input_test'])
-
-print("Not creating any predictions!!!", flush=True)
-# input_test.obs["label_pred"] = "foo"
-
-print("Not adding method name to uns!!!", flush=True)
-# input_test.uns["method_id"] = meta["functionality_name"]
-
-print("Write output to file", flush=True)
-input_test.write_h5ad(par["output"], compression="gzip")
-HERE
-```
-
-Let's introduce a bug in the script and try running the test again. For instance:
-
-```{embed lang="python"}
-src/tasks/label_projection/methods/foo/script.py
-```
-
-If we now run the test, we should get an error since we didn't create all of the required output slots.
-
-```{bash error=TRUE}
-viash test src/tasks/label_projection/methods/foo/config.vsh.yaml
-```
-
-
-## More information
-
-The [Viash reference docs](https://viash.io/reference/config/) page provides information on all of the available fields in a Viash config, and the [Guide](https://viash.io/guide/) will help you get started with creating components from scratch.
-
-
-
-
-```{bash, echo=FALSE}
-rm -r src/tasks/label_projection/methods/foo target/docker/label_projection/methods/foo
-```
-
-## Branch Naming Conventions
-
-### Category
-
-A git branch should start with a category. Pick one of these: feature, bugfix, hotfix, or test.
-
-* `feature` is for adding, refactoring or removing a feature
-* `bugfix` is for fixing a bug
-* `hotfix` is for changing code with a temporary solution and/or without following the usual process (usually because of an emergency)
-* `test` is for experimenting outside of an issue/ticket
-* `doc` is for adding, changing or removing documentation
-
-### Reference
-
-After the category, there should be a "`/`" followed by the reference of the issue/ticket/task you are working on. If there's no reference, just add no-ref. With task it is meant as benchmarking task e.g. batch_integration
-
-### Description
-
-After the reference, there should be another "`/`" followed by a description which sums up the purpose of this specific branch. This description should be short and "kebab-cased".
-
-By default, you can use the title of the issue/ticket you are working on. Just replace any special character by "`-`".
-
-### To sum up, follow this pattern when branching:
-
-```bash
-git branch
-```
-
-### Examples
-
-* You need to add, refactor or remove a feature: `git branch feature/issue-42/create-new-button-component`
-* You need to fix a bug: `git branch bugfix/issue-342/button-overlap-form-on-mobile`
-* You need to fix a bug really fast (possibly with a temporary solution): `git branch hotfix/no-ref/registration-form-not-working`
-* You need to experiment outside of an issue/ticket: `git branch test/no-ref/refactor-components-with-atomic-design`
-
-### References
-
-* [a-simplified-convention-for-naming-branches-and-commits-in-git](https://dev.to/varbsan/a-simplified-convention-for-naming-branches-and-commits-in-git-il4)
\ No newline at end of file
diff --git a/src/common/create_component/config.vsh.yaml b/src/common/create_component/config.vsh.yaml
index 5c829462ad..b8dc748fb6 100644
--- a/src/common/create_component/config.vsh.yaml
+++ b/src/common/create_component/config.vsh.yaml
@@ -56,9 +56,9 @@ functionality:
- type: python_script
path: test.py
- path: /src
- dest: openproblems-v2/src
+ dest: openproblems/src
- path: /_viash.yaml
- dest: openproblems-v2/_viash.yaml
+ dest: openproblems/_viash.yaml
platforms:
- type: docker
image: python:3.10-slim
diff --git a/src/common/create_component/test.py b/src/common/create_component/test.py
index 16da1bd854..a53e54a18e 100644
--- a/src/common/create_component/test.py
+++ b/src/common/create_component/test.py
@@ -9,7 +9,7 @@
}
## VIASH END
-opv2 = f"{meta['resources_dir']}/openproblems-v2"
+opv2 = f"{meta['resources_dir']}/openproblems"
output_path = f"{opv2}/src/tasks/label_projection/methods/test_method"
cmd = [
diff --git a/src/common/create_task_readme/config.vsh.yaml b/src/common/create_task_readme/config.vsh.yaml
index d268974ce8..cff0917b0d 100644
--- a/src/common/create_task_readme/config.vsh.yaml
+++ b/src/common/create_task_readme/config.vsh.yaml
@@ -25,7 +25,7 @@ functionality:
name: --github_url
description: |
URL to the GitHub repository. Needed for linking to the source code.
- default: "https://github.com/openproblems-bio/openproblems-v2/tree/main/"
+ default: "https://github.com/openproblems-bio/openproblems/tree/main/"
- name: Outputs
arguments:
- type: file
@@ -43,9 +43,9 @@ functionality:
- type: r_script
path: test.R
- path: /src
- dest: openproblems-v2/src
+ dest: openproblems/src
- path: /_viash.yaml
- dest: openproblems-v2/_viash.yaml
+ dest: openproblems/_viash.yaml
platforms:
- type: docker
image: openproblems/base_r:1.0.0
diff --git a/src/common/create_task_readme/script.R b/src/common/create_task_readme/script.R
index 55388ea7ed..35320e4d97 100644
--- a/src/common/create_task_readme/script.R
+++ b/src/common/create_task_readme/script.R
@@ -8,7 +8,7 @@ par <- list(
"task_dir" = "src/tasks/batch_integration",
"output" = "src/tasks/batch_integration/README.md",
"viash_yaml" = "_viash.yaml",
- "github_url" = "https://github.com/openproblems-bio/openproblems-v2/tree/main/"
+ "github_url" = "https://github.com/openproblems-bio/openproblems/tree/main/"
)
meta <- list(
"resources_dir" = "src/common/helper_functions",
diff --git a/src/common/create_task_readme/test.R b/src/common/create_task_readme/test.R
index 9af1fe9738..3a981fe7ca 100644
--- a/src/common/create_task_readme/test.R
+++ b/src/common/create_task_readme/test.R
@@ -3,7 +3,7 @@ requireNamespace("assertthat", quietly = TRUE)
## VIASH START
## VIASH END
-opv2 <- paste0(meta$resources_dir, "/openproblems-v2")
+opv2 <- paste0(meta$resources_dir, "/openproblems")
output_path <- "output.md"
cat(">> Running the script as test\n")
diff --git a/src/common/helper_functions/read_api_files.R b/src/common/helper_functions/read_api_files.R
index be602b58c4..f2cf49b2f8 100644
--- a/src/common/helper_functions/read_api_files.R
+++ b/src/common/helper_functions/read_api_files.R
@@ -232,7 +232,7 @@ render_component <- function(spec) {
strip_margin(glue::glue("
§## Component type: {spec$info$label}
§
- §Path: [`src/{spec$info$namespace}`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/{spec$info$namespace})
+ §Path: [`src/{spec$info$namespace}`](https://github.com/openproblems-bio/openproblems/tree/main/src/{spec$info$namespace})
§
§{spec$info$summary}
§
diff --git a/src/common/ontology/check_obsolete_terms/config.vsh.yaml b/src/common/ontology/check_obsolete_terms/config.vsh.yaml
deleted file mode 100644
index fc006f6cf9..0000000000
--- a/src/common/ontology/check_obsolete_terms/config.vsh.yaml
+++ /dev/null
@@ -1,76 +0,0 @@
-functionality:
- status: disabled
- name: check_obsolete_terms
- namespace: common/ontology
- description: |
- Check for obsolete ontology terms in the dataset.
- argument_groups:
- - name: Inputs
- arguments:
- - name: "--input"
- type: file
- description: "Input h5ad file."
- required: true
- direction: input
- example: dataset.h5ad
- - name: "--struct"
- type: string
- description: "In which struct to look for the term."
- required: true
- direction: input
- example: "obs"
- - name: "--input_term"
- type: string
- description: "In which field to look for the term."
- required: true
- direction: input
- example: "cell_type_ontology_term_id"
- - name: Ontology
- arguments:
- - name: "--ontology"
- type: file
- description: "Ontology to check."
- required: true
- direction: input
- example: cl.obo
- - name: Arguments
- arguments:
- - name: "--obsolete_as_na"
- type: boolean
- description: "Whether to replace obsolete terms with NA."
- default: true
- - name: Outputs
- arguments:
- - name: "--output"
- type: file
- description: Output h5ad file.
- direction: output
- example: output.h5ad
- - name: "--output_term"
- type: string
- description: "In which field to store the updated term."
- required: true
- example: "cell_type_ontology_term_id"
- - name: "--output_name"
- type: string
- description: "In which field to store the updated term name."
- required: true
- example: "cell_type"
- - name: "--output_obsolete"
- type: string
- description: "In which field to store whether a term is obsolete."
- required: true
- example: "cell_type_ontology_obsolete"
- resources:
- - type: r_script
- path: script.R
- test_resources:
- - type: r_script
- path: test.R
- - path: /resources_test/common/cellxgene_census
-platforms:
- - type: docker
- image: openproblems/base_r:1.0.0
- setup:
- - type: r
- packages: [ dplyr, tidyr, tibble, ontologyIndex, processx ]
\ No newline at end of file
diff --git a/src/common/ontology/check_obsolete_terms/script.R b/src/common/ontology/check_obsolete_terms/script.R
deleted file mode 100644
index bc1ef0ccb7..0000000000
--- a/src/common/ontology/check_obsolete_terms/script.R
+++ /dev/null
@@ -1,63 +0,0 @@
-library(dplyr, warn.conflicts = FALSE)
-library(tidyr, warn.conflicts = FALSE)
-library(tibble, warn.conflicts = FALSE)
-library(ontologyIndex, warn.conflicts = FALSE)
-
-## VIASH START
-par <- list(
- input = "resources_test/common/cellxgene_census/dataset.h5ad",
- ontology = "resources_test/common/cellxgene_census/cl.obo",
- input_term = "cell_type_ontology_term_id",
- struct = "obs",
- output = "output.h5ad",
- output_term = "cell_type_ontology_term_id",
- output_name = "cell_type",
- output_obsolete = "cell_type_ontology_obsolete",
- obsolete_as_na = TRUE
-)
-## VIASH END
-
-cat("Read ontology\n")
-ont <- ontologyIndex::get_ontology(
- par$ontology,
- extract_tags = "everything"
-)
-ont_tib <- ont %>%
- as.data.frame %>%
- select(id, name, obsolete, replaced_by) %>%
- as_tibble
-
-cat("Read anndata\n")
-adata <- anndata::read_h5ad(par$input, backed = "r")
-
-cat("Find terms\n")
-term_ids <- adata[[par$struct]][[par$input_term]]
-
-unique_term_ids <- as.character(unique(term_ids))
-
-cat("Look for obsolete or replaced terms\n")
-ont_map <- ont_tib %>%
- slice(match(unique_term_ids, id)) %>%
- transmute(
- orig_id = id,
- id = case_when(
- !obsolete ~ id,
- replaced_by != "" ~ replaced_by,
- rep(par$obsolete_as_na, length(id)) ~ rep(NA_character_, length(id)),
- TRUE ~ id
- )
- ) %>%
- left_join(ont_tib %>% select(id, name, obsolete), by = "id")
-
-cat("Store new columns in data structure\n")
-new_data <- ont_map %>% slice(match(term_ids, orig_id))
-adata[[par$struct]][[par$output_term]] <- new_data$id
-adata[[par$struct]][[par$output_name]] <- new_data$name
-adata[[par$struct]][[par$output_obsolete]] <- ifelse(
- !is.na(new_data$obsolete),
- new_data$obsolete,
- TRUE
-)
-
-cat("Write to file\n")
-anndata::write_h5ad(adata, par$output)
diff --git a/src/common/ontology/check_obsolete_terms/test.R b/src/common/ontology/check_obsolete_terms/test.R
deleted file mode 100644
index 5e3c582021..0000000000
--- a/src/common/ontology/check_obsolete_terms/test.R
+++ /dev/null
@@ -1,54 +0,0 @@
-library(assertthat)
-
-## VIASH START
-meta <- list(
- executable = "target/docker/common/ontology/check_obsolete_terms",
- resources_dir = "resources_test/common/"
-)
-## VIASH END
-
-input_file <- paste0(meta$resources_dir, "/cellxgene_census/dataset.h5ad")
-ontology_file <- paste0(meta$resources_dir, "/cellxgene_census/cl.obo")
-temp_file <- tempfile(fileext = ".h5ad")
-temp2_file <- tempfile(fileext = ".h5ad")
-
-# add obsolete terms to the dataset
-input <- anndata::read_h5ad(input_file)
-input$obs$cell_type_ontology_term_id <- as.character(input$obs$cell_type_ontology_term_id)
-input$obs$cell_type_ontology_term_id[1:3] <- "CL:0000375" # obsolete, replaced by 'CL:0007010'
-input$obs$cell_type_ontology_term_id[4:6] <- "CL:0000399" # obsolete, removed
-input$obs$cell_type_ontology_term_id[7:9] <- "CL:0007011" # not obsolete
-zzz <- input$write_h5ad(temp_file)
-
-# run component
-zzz <- processx::run(
- meta$executable,
- c(
- "--input", temp_file,
- "--struct", "obs",
- "--input_term", "cell_type_ontology_term_id",
- "--ontology", ontology_file,
- "--output", temp2_file,
- "--output_term", "cell_type_ontology_term_id_new",
- "--output_name", "cell_type_new",
- "--output_obsolete", "cell_type_obsolete_new"
- ),
- echo = TRUE
-)
-
-# check output
-output <- anndata::read_h5ad(temp2_file)
-
-print(output$obs[1:10, , drop = FALSE])
-
-assert_that(
- all(output$obs$cell_type_ontology_term_id_new[1:3] == "CL:0007010"),
- all(is.na(output$obs$cell_type_ontology_term_id_new[4:6])),
- all(output$obs$cell_type_ontology_term_id_new[7:9] == "CL:0007011"),
- all(output$obs$cell_type_new[1:3] == "preosteoblast"),
- all(is.na(output$obs$cell_type_new[4:6])),
- all(output$obs$cell_type_new[7:9] == "enteric neuron"),
- all(!output$obs$cell_type_obsolete_new[1:3]),
- all(output$obs$cell_type_obsolete_new[4:6]),
- all(!output$obs$cell_type_obsolete_new[7:9])
-)
diff --git a/src/common/process_task_results/api/get_info.yaml b/src/common/process_task_results/api/get_info.yaml
index 117504cc75..9691936615 100644
--- a/src/common/process_task_results/api/get_info.yaml
+++ b/src/common/process_task_results/api/get_info.yaml
@@ -18,6 +18,6 @@ functionality:
- type: python_script
path: /src/common/comp_tests/check_get_info.py
- path: /src
- dest: openproblems-v2/src
+ dest: openproblems/src
- path: /_viash.yaml
- dest: openproblems-v2/_viash.yaml
\ No newline at end of file
+ dest: openproblems/_viash.yaml
\ No newline at end of file
diff --git a/src/common/process_task_results/get_task_info/script.R b/src/common/process_task_results/get_task_info/script.R
index cfe529edfc..71f1cb777a 100644
--- a/src/common/process_task_results/get_task_info/script.R
+++ b/src/common/process_task_results/get_task_info/script.R
@@ -20,7 +20,7 @@ out <- list(
task_name = info$label,
task_summary = info$summary,
task_description = paste0(info$motivation, "\n\n", info$description),
- repo = "openproblems-bio/openproblems-v2",
+ repo = "openproblems-bio/openproblems",
authors = info$authors
)
diff --git a/src/common/process_task_results/run/run_nf_tower_test.sh b/src/common/process_task_results/run/run_nf_tower_test.sh
index 95fa080f12..ca74e357a1 100644
--- a/src/common/process_task_results/run/run_nf_tower_test.sh
+++ b/src/common/process_task_results/run/run_nf_tower_test.sh
@@ -28,7 +28,7 @@ process {
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/common/workflows/transform_meta/main.nf \
diff --git a/src/datasets/resource_scripts/cellxgene_census.sh b/src/datasets/resource_scripts/cellxgene_census.sh
index f0d93c9210..5d6181f91e 100755
--- a/src/datasets/resource_scripts/cellxgene_census.sh
+++ b/src/datasets/resource_scripts/cellxgene_census.sh
@@ -142,7 +142,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/datasets/workflows/process_cellxgene_census/main.nf \
diff --git a/src/datasets/resource_scripts/dataset_info.sh b/src/datasets/resource_scripts/dataset_info.sh
index 6ec2de9963..04c032916f 100755
--- a/src/datasets/resource_scripts/dataset_info.sh
+++ b/src/datasets/resource_scripts/dataset_info.sh
@@ -31,7 +31,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--entry-name auto \
--pull-latest \
diff --git a/src/datasets/resource_scripts/openproblems_neurips2021_multimodal.sh b/src/datasets/resource_scripts/openproblems_neurips2021_multimodal.sh
index 8fd7e3a72d..a306ba2ef8 100755
--- a/src/datasets/resource_scripts/openproblems_neurips2021_multimodal.sh
+++ b/src/datasets/resource_scripts/openproblems_neurips2021_multimodal.sh
@@ -35,7 +35,7 @@ output_state: '$id/state.yaml'
publish_dir: s3://openproblems-data/resources/datasets
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/datasets/workflows/process_openproblems_neurips2021_bmmc/main.nf \
diff --git a/src/datasets/resource_scripts/openproblems_neurips2022_pbmc.sh b/src/datasets/resource_scripts/openproblems_neurips2022_pbmc.sh
index 56b61ca104..e3e6783a8e 100755
--- a/src/datasets/resource_scripts/openproblems_neurips2022_pbmc.sh
+++ b/src/datasets/resource_scripts/openproblems_neurips2022_pbmc.sh
@@ -46,7 +46,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/datasets/workflows/process_openproblems_neurips2022_pbmc/main.nf \
diff --git a/src/datasets/resource_scripts/openproblems_v1.sh b/src/datasets/resource_scripts/openproblems_v1.sh
index 1a01e2120e..8d40e57c46 100755
--- a/src/datasets/resource_scripts/openproblems_v1.sh
+++ b/src/datasets/resource_scripts/openproblems_v1.sh
@@ -171,7 +171,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/datasets/workflows/process_openproblems_v1/main.nf \
diff --git a/src/datasets/resource_scripts/openproblems_v1_multimodal.sh b/src/datasets/resource_scripts/openproblems_v1_multimodal.sh
index 3efb960c45..2d516a8ccb 100755
--- a/src/datasets/resource_scripts/openproblems_v1_multimodal.sh
+++ b/src/datasets/resource_scripts/openproblems_v1_multimodal.sh
@@ -74,7 +74,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/datasets/workflows/process_openproblems_v1_multimodal/main.nf \
diff --git a/src/datasets/resource_scripts/tenx_visium.sh b/src/datasets/resource_scripts/tenx_visium.sh
index 7993cebd4b..d5b54e7ef5 100755
--- a/src/datasets/resource_scripts/tenx_visium.sh
+++ b/src/datasets/resource_scripts/tenx_visium.sh
@@ -306,7 +306,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision integration_build \
--pull-latest \
--main-script target/nextflow/datasets/workflows/process_tenx_visium/main.nf \
diff --git a/src/datasets/resource_scripts/zenodo_spatial.sh.sh b/src/datasets/resource_scripts/zenodo_spatial.sh.sh
index 192cb8cc9c..7842b4368f 100755
--- a/src/datasets/resource_scripts/zenodo_spatial.sh.sh
+++ b/src/datasets/resource_scripts/zenodo_spatial.sh.sh
@@ -404,7 +404,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/datasets/workflows/process_zenodo_spatial/main.nf \
diff --git a/src/datasets/resource_scripts/zenodo_spatial_slidetags.sh b/src/datasets/resource_scripts/zenodo_spatial_slidetags.sh
index d8654ce439..5ab4962240 100755
--- a/src/datasets/resource_scripts/zenodo_spatial_slidetags.sh
+++ b/src/datasets/resource_scripts/zenodo_spatial_slidetags.sh
@@ -72,7 +72,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/datasets/workflows/process_zenodo_spatial_slidetags/main.nf \
diff --git a/src/datasets/resource_test_scripts/neurips2021_bmmc.sh b/src/datasets/resource_test_scripts/neurips2021_bmmc.sh
index 7922f634cb..98644d9dbf 100755
--- a/src/datasets/resource_test_scripts/neurips2021_bmmc.sh
+++ b/src/datasets/resource_test_scripts/neurips2021_bmmc.sh
@@ -58,7 +58,7 @@ nextflow run . \
-params-file "$params_file" \
-c src/wf_utils/labels.config
-# tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+# tw launch https://github.com/openproblems-bio/openproblems.git \
# --revision main_build \
# --main-script target/nextflow/datasets/workflows/process_openproblems_neurips2021_bmmc/main.nf \
# --workspace 53907369739130 \
diff --git a/src/datasets/resource_test_scripts/neurips2022_pbmc.sh b/src/datasets/resource_test_scripts/neurips2022_pbmc.sh
index ef2e0523e1..b62e6f40e1 100755
--- a/src/datasets/resource_test_scripts/neurips2022_pbmc.sh
+++ b/src/datasets/resource_test_scripts/neurips2022_pbmc.sh
@@ -60,7 +60,7 @@ process {
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/datasets/workflows/process_openproblems_neurips2022_pbmc/main.nf \
diff --git a/src/tasks/batch_integration/README.md b/src/tasks/batch_integration/README.md
index 7d525e9fc8..073a654508 100644
--- a/src/tasks/batch_integration/README.md
+++ b/src/tasks/batch_integration/README.md
@@ -5,7 +5,7 @@ Remove unwanted batch effects from scRNA data while retaining
biologically meaningful variation.
Path:
-[`src/tasks/batch_integration`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/tasks/batch_integration)
+[`src/tasks/batch_integration`](https://github.com/openproblems-bio/openproblems/tree/main/src/tasks/batch_integration)
## Motivation
@@ -151,7 +151,7 @@ Slot description:
## Component type: Data processor
Path:
-[`src/batch_integration`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/batch_integration)
+[`src/batch_integration`](https://github.com/openproblems-bio/openproblems/tree/main/src/batch_integration)
A label projection dataset processor.
@@ -265,7 +265,7 @@ Slot description:
## Component type: Control method (embedding)
Path:
-[`src/batch_integration/control_methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/batch_integration/control_methods)
+[`src/batch_integration/control_methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/batch_integration/control_methods)
A batch integration embedding control method.
@@ -283,7 +283,7 @@ Arguments:
## Component type: Control method (graph)
Path:
-[`src/batch_integration/control_methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/batch_integration/control_methods)
+[`src/batch_integration/control_methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/batch_integration/control_methods)
A batch integration graph control method.
@@ -301,7 +301,7 @@ Arguments:
## Component type: Method (embedding)
Path:
-[`src/batch_integration/methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/batch_integration/methods)
+[`src/batch_integration/methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/batch_integration/methods)
A batch integration embedding method.
@@ -319,7 +319,7 @@ Arguments:
## Component type: Method (feature)
Path:
-[`src/batch_integration/methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/batch_integration/methods)
+[`src/batch_integration/methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/batch_integration/methods)
A batch integration feature method.
@@ -337,7 +337,7 @@ Arguments:
## Component type: Method (graph)
Path:
-[`src/batch_integration/methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/batch_integration/methods)
+[`src/batch_integration/methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/batch_integration/methods)
A batch integration graph method.
@@ -355,7 +355,7 @@ Arguments:
## Component type: Metric (embedding)
Path:
-[`src/batch_integration/metrics`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/batch_integration/metrics)
+[`src/batch_integration/metrics`](https://github.com/openproblems-bio/openproblems/tree/main/src/batch_integration/metrics)
A batch integration embedding metric.
@@ -374,7 +374,7 @@ Arguments:
## Component type: Metric (feature)
Path:
-[`src/batch_integration/metrics`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/batch_integration/metrics)
+[`src/batch_integration/metrics`](https://github.com/openproblems-bio/openproblems/tree/main/src/batch_integration/metrics)
A batch integration feature metric.
@@ -393,7 +393,7 @@ Arguments:
## Component type: Metric (graph)
Path:
-[`src/batch_integration/metrics`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/batch_integration/metrics)
+[`src/batch_integration/metrics`](https://github.com/openproblems-bio/openproblems/tree/main/src/batch_integration/metrics)
A batch integration graph metric.
@@ -536,7 +536,7 @@ Slot description:
## Component type: Embedding to Graph
Path:
-[`src/batch_integration/transformers`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/batch_integration/transformers)
+[`src/batch_integration/transformers`](https://github.com/openproblems-bio/openproblems/tree/main/src/batch_integration/transformers)
Transform an embedding to a graph output.
@@ -554,7 +554,7 @@ Arguments:
## Component type: Feature to Embedding
Path:
-[`src/batch_integration/transformers`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/batch_integration/transformers)
+[`src/batch_integration/transformers`](https://github.com/openproblems-bio/openproblems/tree/main/src/batch_integration/transformers)
Transform a feature output to an embedding.
diff --git a/src/tasks/batch_integration/resources_scripts/process_datasets.sh b/src/tasks/batch_integration/resources_scripts/process_datasets.sh
index 97e6b2c61c..b49c203af8 100755
--- a/src/tasks/batch_integration/resources_scripts/process_datasets.sh
+++ b/src/tasks/batch_integration/resources_scripts/process_datasets.sh
@@ -21,7 +21,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/batch_integration/workflows/process_datasets/main.nf \
diff --git a/src/tasks/batch_integration/resources_scripts/run_benchmark.sh b/src/tasks/batch_integration/resources_scripts/run_benchmark.sh
index f48a5ccdd1..cd83810680 100755
--- a/src/tasks/batch_integration/resources_scripts/run_benchmark.sh
+++ b/src/tasks/batch_integration/resources_scripts/run_benchmark.sh
@@ -10,7 +10,7 @@ output_state: "state.yaml"
publish_dir: "$publish_dir"
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/batch_integration/workflows/run_benchmark/main.nf \
diff --git a/src/tasks/batch_integration/resources_scripts/run_benchmark_test.sh b/src/tasks/batch_integration/resources_scripts/run_benchmark_test.sh
index b9b80a38ea..eca3049d3a 100755
--- a/src/tasks/batch_integration/resources_scripts/run_benchmark_test.sh
+++ b/src/tasks/batch_integration/resources_scripts/run_benchmark_test.sh
@@ -13,7 +13,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/batch_integration/workflows/run_benchmark/main.nf \
diff --git a/src/tasks/denoising/README.md b/src/tasks/denoising/README.md
index da9d9b1912..5f33715180 100644
--- a/src/tasks/denoising/README.md
+++ b/src/tasks/denoising/README.md
@@ -4,7 +4,7 @@
Removing noise in sparse single-cell RNA-sequencing count data
Path:
-[`src/tasks/denoising`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/tasks/denoising)
+[`src/tasks/denoising`](https://github.com/openproblems-bio/openproblems/tree/main/src/tasks/denoising)
## Motivation
@@ -162,7 +162,7 @@ Slot description:
## Component type: Data processor
Path:
-[`src/denoising`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/denoising)
+[`src/denoising`](https://github.com/openproblems-bio/openproblems/tree/main/src/denoising)
A denoising dataset processor.
@@ -242,7 +242,7 @@ Slot description:
## Component type: Control method
Path:
-[`src/denoising/control_methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/denoising/control_methods)
+[`src/denoising/control_methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/denoising/control_methods)
Quality control methods for verifying the pipeline.
@@ -261,7 +261,7 @@ Arguments:
## Component type: Method
Path:
-[`src/denoising/methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/denoising/methods)
+[`src/denoising/methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/denoising/methods)
A denoising method.
@@ -279,7 +279,7 @@ Arguments:
## Component type: Metric
Path:
-[`src/denoising/metrics`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/denoising/metrics)
+[`src/denoising/metrics`](https://github.com/openproblems-bio/openproblems/tree/main/src/denoising/metrics)
A denoising metric.
diff --git a/src/tasks/denoising/resources_scripts/process_datasets.sh b/src/tasks/denoising/resources_scripts/process_datasets.sh
index 44060a8f66..873b9fb0b4 100755
--- a/src/tasks/denoising/resources_scripts/process_datasets.sh
+++ b/src/tasks/denoising/resources_scripts/process_datasets.sh
@@ -22,7 +22,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/denoising/workflows/process_datasets/main.nf \
diff --git a/src/tasks/denoising/resources_scripts/run_benchmark.sh b/src/tasks/denoising/resources_scripts/run_benchmark.sh
index 983c42cc56..8e38568ac8 100755
--- a/src/tasks/denoising/resources_scripts/run_benchmark.sh
+++ b/src/tasks/denoising/resources_scripts/run_benchmark.sh
@@ -11,7 +11,7 @@ output_state: "state.yaml"
publish_dir: "$publish_dir"
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/denoising/workflows/run_benchmark/main.nf \
diff --git a/src/tasks/denoising/resources_scripts/run_benchmark_test.sh b/src/tasks/denoising/resources_scripts/run_benchmark_test.sh
index 7f3ecbd3d2..c9023c26f1 100755
--- a/src/tasks/denoising/resources_scripts/run_benchmark_test.sh
+++ b/src/tasks/denoising/resources_scripts/run_benchmark_test.sh
@@ -13,7 +13,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/denoising/workflows/run_benchmark/main.nf \
diff --git a/src/tasks/dimensionality_reduction/README.md b/src/tasks/dimensionality_reduction/README.md
index c18c5dc5ba..c5bc42e09d 100644
--- a/src/tasks/dimensionality_reduction/README.md
+++ b/src/tasks/dimensionality_reduction/README.md
@@ -5,7 +5,7 @@ Reduction of high-dimensional datasets to 2D for visualization &
interpretation
Path:
-[`src/tasks/dimensionality_reduction`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/tasks/dimensionality_reduction)
+[`src/tasks/dimensionality_reduction`](https://github.com/openproblems-bio/openproblems/tree/main/src/tasks/dimensionality_reduction)
## Motivation
@@ -170,7 +170,7 @@ Slot description:
## Component type: Data processor
Path:
-[`src/dimensionality_reduction`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/dimensionality_reduction)
+[`src/dimensionality_reduction`](https://github.com/openproblems-bio/openproblems/tree/main/src/dimensionality_reduction)
A dimensionality reduction dataset processor.
@@ -261,7 +261,7 @@ Slot description:
## Component type: Control method
Path:
-[`src/dimensionality_reduction/control_methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/dimensionality_reduction/control_methods)
+[`src/dimensionality_reduction/control_methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/dimensionality_reduction/control_methods)
Quality control methods for verifying the pipeline.
@@ -280,7 +280,7 @@ Arguments:
## Component type: Method
Path:
-[`src/dimensionality_reduction/methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/dimensionality_reduction/methods)
+[`src/dimensionality_reduction/methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/dimensionality_reduction/methods)
A dimensionality reduction method.
@@ -298,7 +298,7 @@ Arguments:
## Component type: Metric
Path:
-[`src/dimensionality_reduction/metrics`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/dimensionality_reduction/metrics)
+[`src/dimensionality_reduction/metrics`](https://github.com/openproblems-bio/openproblems/tree/main/src/dimensionality_reduction/metrics)
A dimensionality reduction metric.
diff --git a/src/tasks/dimensionality_reduction/resources_scripts/process_datasets.sh b/src/tasks/dimensionality_reduction/resources_scripts/process_datasets.sh
index f83056dad6..11e911edac 100755
--- a/src/tasks/dimensionality_reduction/resources_scripts/process_datasets.sh
+++ b/src/tasks/dimensionality_reduction/resources_scripts/process_datasets.sh
@@ -22,7 +22,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/dimensionality_reduction/workflows/process_datasets/main.nf \
diff --git a/src/tasks/dimensionality_reduction/resources_scripts/run_benchmark.sh b/src/tasks/dimensionality_reduction/resources_scripts/run_benchmark.sh
index 02c58d5cc5..5cf975d3b5 100755
--- a/src/tasks/dimensionality_reduction/resources_scripts/run_benchmark.sh
+++ b/src/tasks/dimensionality_reduction/resources_scripts/run_benchmark.sh
@@ -10,7 +10,7 @@ output_state: "state.yaml"
publish_dir: "$publish_dir"
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/dimensionality_reduction/workflows/run_benchmark/main.nf \
diff --git a/src/tasks/dimensionality_reduction/resources_scripts/run_benchmark_test.sh b/src/tasks/dimensionality_reduction/resources_scripts/run_benchmark_test.sh
index 1c778d345c..be6defda0f 100755
--- a/src/tasks/dimensionality_reduction/resources_scripts/run_benchmark_test.sh
+++ b/src/tasks/dimensionality_reduction/resources_scripts/run_benchmark_test.sh
@@ -13,7 +13,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/dimensionality_reduction/workflows/run_benchmark/main.nf \
diff --git a/src/tasks/label_projection/README.md b/src/tasks/label_projection/README.md
index 8981c503be..7694bc0aa6 100644
--- a/src/tasks/label_projection/README.md
+++ b/src/tasks/label_projection/README.md
@@ -4,7 +4,7 @@
Automated cell type annotation from rich, labeled reference data
Path:
-[`src/tasks/label_projection`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/tasks/label_projection)
+[`src/tasks/label_projection`](https://github.com/openproblems-bio/openproblems/tree/main/src/tasks/label_projection)
## Motivation
@@ -119,7 +119,7 @@ Slot description:
## Component type: Data processor
Path:
-[`src/label_projection`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/label_projection)
+[`src/label_projection`](https://github.com/openproblems-bio/openproblems/tree/main/src/label_projection)
A label projection dataset processor.
@@ -255,7 +255,7 @@ Slot description:
## Component type: Control method
Path:
-[`src/label_projection/control_methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/label_projection/control_methods)
+[`src/label_projection/control_methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/label_projection/control_methods)
Quality control methods for verifying the pipeline.
@@ -275,7 +275,7 @@ Arguments:
## Component type: Method
Path:
-[`src/label_projection/methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/label_projection/methods)
+[`src/label_projection/methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/label_projection/methods)
A label projection method.
@@ -294,7 +294,7 @@ Arguments:
## Component type: Metric
Path:
-[`src/label_projection/metrics`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/label_projection/metrics)
+[`src/label_projection/metrics`](https://github.com/openproblems-bio/openproblems/tree/main/src/label_projection/metrics)
A label projection metric.
diff --git a/src/tasks/label_projection/resources_scripts/process_datasets.sh b/src/tasks/label_projection/resources_scripts/process_datasets.sh
index dbd284d237..d5c6353ff5 100755
--- a/src/tasks/label_projection/resources_scripts/process_datasets.sh
+++ b/src/tasks/label_projection/resources_scripts/process_datasets.sh
@@ -22,7 +22,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/label_projection/workflows/process_datasets/main.nf \
diff --git a/src/tasks/label_projection/resources_scripts/run_benchmark.sh b/src/tasks/label_projection/resources_scripts/run_benchmark.sh
index 58a16c38d3..8733e22f52 100755
--- a/src/tasks/label_projection/resources_scripts/run_benchmark.sh
+++ b/src/tasks/label_projection/resources_scripts/run_benchmark.sh
@@ -11,7 +11,7 @@ settings: '{"method_ids": "scanvi_scarches"}'
publish_dir: "$publish_dir"
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/label_projection/workflows/run_benchmark/main.nf \
diff --git a/src/tasks/label_projection/resources_scripts/run_benchmark_test.sh b/src/tasks/label_projection/resources_scripts/run_benchmark_test.sh
index 5baf56f4e4..caf699a384 100755
--- a/src/tasks/label_projection/resources_scripts/run_benchmark_test.sh
+++ b/src/tasks/label_projection/resources_scripts/run_benchmark_test.sh
@@ -13,7 +13,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/label_projection/workflows/run_benchmark/main.nf \
diff --git a/src/tasks/match_modalities/README.md b/src/tasks/match_modalities/README.md
index 399c31ee92..777f367507 100644
--- a/src/tasks/match_modalities/README.md
+++ b/src/tasks/match_modalities/README.md
@@ -5,7 +5,7 @@ Match cells across datasets of the same set of samples on different
technologies / modalities.
Path:
-[`src/tasks/match_modalities`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/tasks/match_modalities)
+[`src/tasks/match_modalities`](https://github.com/openproblems-bio/openproblems/tree/main/src/tasks/match_modalities)
## Motivation
@@ -135,7 +135,7 @@ Slot description:
## Component type: Data processor
Path:
-[`src/match_modalities`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/match_modalities)
+[`src/match_modalities`](https://github.com/openproblems-bio/openproblems/tree/main/src/match_modalities)
A match modalities dataset processor.
@@ -303,7 +303,7 @@ Slot description:
## Component type: Control method
Path:
-[`src/match_modalities/control_methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/match_modalities/control_methods)
+[`src/match_modalities/control_methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/match_modalities/control_methods)
A multimodal data integration control method.
@@ -325,7 +325,7 @@ Arguments:
## Component type: Method
Path:
-[`src/match_modalities/methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/match_modalities/methods)
+[`src/match_modalities/methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/match_modalities/methods)
A multimodal data integration method.
@@ -345,7 +345,7 @@ Arguments:
## Component type: Metric
Path:
-[`src/match_modalities/metrics`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/match_modalities/metrics)
+[`src/match_modalities/metrics`](https://github.com/openproblems-bio/openproblems/tree/main/src/match_modalities/metrics)
A multimodal data integration metric.
diff --git a/src/tasks/match_modalities/resources_scripts/process_datasets.sh b/src/tasks/match_modalities/resources_scripts/process_datasets.sh
index 149130d0cf..e5796bd641 100755
--- a/src/tasks/match_modalities/resources_scripts/process_datasets.sh
+++ b/src/tasks/match_modalities/resources_scripts/process_datasets.sh
@@ -22,7 +22,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/match_modalities/workflows/process_datasets/main.nf \
diff --git a/src/tasks/match_modalities/resources_scripts/run_benchmark.sh b/src/tasks/match_modalities/resources_scripts/run_benchmark.sh
index 001ba3437b..41789c6a0f 100755
--- a/src/tasks/match_modalities/resources_scripts/run_benchmark.sh
+++ b/src/tasks/match_modalities/resources_scripts/run_benchmark.sh
@@ -11,7 +11,7 @@ output_state: "state.yaml"
publish_dir: "$publish_dir"
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/match_modalities/workflows/run_benchmark/main.nf \
diff --git a/src/tasks/predict_modality/README.md b/src/tasks/predict_modality/README.md
index add96684ce..4b361c52fb 100644
--- a/src/tasks/predict_modality/README.md
+++ b/src/tasks/predict_modality/README.md
@@ -5,7 +5,7 @@ Predicting the profiles of one modality (e.g. protein abundance) from
another (e.g. mRNA expression).
Path:
-[`src/tasks/predict_modality`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/tasks/predict_modality)
+[`src/tasks/predict_modality`](https://github.com/openproblems-bio/openproblems/tree/main/src/tasks/predict_modality)
## Motivation
@@ -131,7 +131,7 @@ Slot description:
## Component type: Data processor
Path:
-[`src/predict_modality`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/predict_modality)
+[`src/predict_modality`](https://github.com/openproblems-bio/openproblems/tree/main/src/predict_modality)
A predict modality dataset processor.
@@ -323,7 +323,7 @@ Slot description:
## Component type: Control method
Path:
-[`src/predict_modality/control_methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/predict_modality/control_methods)
+[`src/predict_modality/control_methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/predict_modality/control_methods)
Quality control methods for verifying the pipeline.
@@ -344,7 +344,7 @@ Arguments:
## Component type: Method
Path:
-[`src/predict_modality/methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/predict_modality/methods)
+[`src/predict_modality/methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/predict_modality/methods)
A regression method.
@@ -364,7 +364,7 @@ Arguments:
## Component type: Metric
Path:
-[`src/predict_modality/metrics`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/predict_modality/metrics)
+[`src/predict_modality/metrics`](https://github.com/openproblems-bio/openproblems/tree/main/src/predict_modality/metrics)
A predict modality metric.
diff --git a/src/tasks/predict_modality/methods/lmds_irlba_rf/config.vsh.yaml b/src/tasks/predict_modality/methods/lmds_irlba_rf/config.vsh.yaml
index ba86f0631e..0ed08b89aa 100644
--- a/src/tasks/predict_modality/methods/lmds_irlba_rf/config.vsh.yaml
+++ b/src/tasks/predict_modality/methods/lmds_irlba_rf/config.vsh.yaml
@@ -7,8 +7,8 @@ functionality:
description: |
A random forest regression using LMDS of modality 1 to predict a PCA embedding of modality 2, which is then reversed to predict the original modality 2.
reference: lance2022multimodal
- documentation_url: https://github.com/openproblems-bio/openproblems-v2/tree/main/src/tasks/predict_modality/methods #/lmds_irlba_rf
- repository_url: https://github.com/openproblems-bio/openproblems-v2
+ documentation_url: https://github.com/openproblems-bio/openproblems/tree/main/src/tasks/predict_modality/methods #/lmds_irlba_rf
+ repository_url: https://github.com/openproblems-bio/openproblems
preferred_normalization: log_cp10k
arguments:
- name: "--distance_method"
diff --git a/src/tasks/predict_modality/resources_scripts/process_datasets.sh b/src/tasks/predict_modality/resources_scripts/process_datasets.sh
index 69d886725f..7be4d548c1 100755
--- a/src/tasks/predict_modality/resources_scripts/process_datasets.sh
+++ b/src/tasks/predict_modality/resources_scripts/process_datasets.sh
@@ -10,7 +10,7 @@ output_state: "$id/state.yaml"
publish_dir: s3://openproblems-data/resources/predict_modality/datasets
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/predict_modality/workflows/process_datasets/main.nf \
diff --git a/src/tasks/predict_modality/resources_scripts/run_benchmark.sh b/src/tasks/predict_modality/resources_scripts/run_benchmark.sh
index 6d4d35219c..941776be43 100755
--- a/src/tasks/predict_modality/resources_scripts/run_benchmark.sh
+++ b/src/tasks/predict_modality/resources_scripts/run_benchmark.sh
@@ -12,7 +12,7 @@ output_state: "state.yaml"
publish_dir: "$publish_dir"
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/predict_modality/workflows/run_benchmark/main.nf \
diff --git a/src/tasks/spatial_decomposition/README.md b/src/tasks/spatial_decomposition/README.md
index 0b3cfc85d9..d5a8b58751 100644
--- a/src/tasks/spatial_decomposition/README.md
+++ b/src/tasks/spatial_decomposition/README.md
@@ -5,7 +5,7 @@ Estimation of cell type proportions per spot in 2D space from spatial
transcriptomic data coupled with corresponding single-cell data
Path:
-[`src/tasks/spatial_decomposition`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/tasks/spatial_decomposition)
+[`src/tasks/spatial_decomposition`](https://github.com/openproblems-bio/openproblems/tree/main/src/tasks/spatial_decomposition)
## Motivation
@@ -121,7 +121,7 @@ Slot description:
## Component type: Data processor
Path:
-[`src/spatial_decomposition`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/spatial_decomposition)
+[`src/spatial_decomposition`](https://github.com/openproblems-bio/openproblems/tree/main/src/spatial_decomposition)
A spatial decomposition dataset processor.
@@ -239,7 +239,7 @@ Slot description:
## Component type: Control method
Path:
-[`src/spatial_decomposition/control_methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/spatial_decomposition/control_methods)
+[`src/spatial_decomposition/control_methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/spatial_decomposition/control_methods)
Quality control methods for verifying the pipeline.
@@ -259,7 +259,7 @@ Arguments:
## Component type: Method
Path:
-[`src/spatial_decomposition/methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/spatial_decomposition/methods)
+[`src/spatial_decomposition/methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/spatial_decomposition/methods)
A spatial composition method.
@@ -278,7 +278,7 @@ Arguments:
## Component type: Metric
Path:
-[`src/spatial_decomposition/metrics`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/spatial_decomposition/metrics)
+[`src/spatial_decomposition/metrics`](https://github.com/openproblems-bio/openproblems/tree/main/src/spatial_decomposition/metrics)
A spatial decomposition metric.
diff --git a/src/tasks/spatial_decomposition/resources_scripts/process_datasets.sh b/src/tasks/spatial_decomposition/resources_scripts/process_datasets.sh
index 337aa34512..39ea9604e3 100755
--- a/src/tasks/spatial_decomposition/resources_scripts/process_datasets.sh
+++ b/src/tasks/spatial_decomposition/resources_scripts/process_datasets.sh
@@ -24,7 +24,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/spatial_decomposition/workflows/process_datasets/main.nf \
diff --git a/src/tasks/spatial_decomposition/resources_scripts/run_benchmark.sh b/src/tasks/spatial_decomposition/resources_scripts/run_benchmark.sh
index 85d5e4fb1e..db22b76f3a 100755
--- a/src/tasks/spatial_decomposition/resources_scripts/run_benchmark.sh
+++ b/src/tasks/spatial_decomposition/resources_scripts/run_benchmark.sh
@@ -10,7 +10,7 @@ output_state: "state.yaml"
publish_dir: "$publish_dir"
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision main_build \
--pull-latest \
--main-script target/nextflow/spatial_decomposition/workflows/run_benchmark/main.nf \
diff --git a/src/tasks/spatially_variable_genes/README.md b/src/tasks/spatially_variable_genes/README.md
index 315c01900a..5e9f43407d 100644
--- a/src/tasks/spatially_variable_genes/README.md
+++ b/src/tasks/spatially_variable_genes/README.md
@@ -11,7 +11,7 @@ significantly across different spatial regions within a tissue or across
cells in a spatially structured context.
Path to source:
-[`src/tasks/spatially_variable_genes`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/tasks/spatially_variable_genes)
+[`src/tasks/spatially_variable_genes`](https://github.com/openproblems-bio/openproblems/tree/main/src/tasks/spatially_variable_genes)
## Motivation
@@ -121,7 +121,7 @@ Slot description:
## Component type: Data processor
Path:
-[`src/spatially_variable_genes`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/spatially_variable_genes)
+[`src/spatially_variable_genes`](https://github.com/openproblems-bio/openproblems/tree/main/src/spatially_variable_genes)
A spatially variable genes dataset processor.
@@ -216,7 +216,7 @@ Slot description:
## Component type: Control method
Path:
-[`src/spatially_variable_genes/control_methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/spatially_variable_genes/control_methods)
+[`src/spatially_variable_genes/control_methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/spatially_variable_genes/control_methods)
Quality control methods for verifying the pipeline.
@@ -235,7 +235,7 @@ Arguments:
## Component type: Method
Path:
-[`src/spatially_variable_genes/methods`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/spatially_variable_genes/methods)
+[`src/spatially_variable_genes/methods`](https://github.com/openproblems-bio/openproblems/tree/main/src/spatially_variable_genes/methods)
A spatially variable gene identification method.
@@ -253,7 +253,7 @@ Arguments:
## Component type: Metric
Path:
-[`src/spatially_variable_genes/metrics`](https://github.com/openproblems-bio/openproblems-v2/tree/main/src/spatially_variable_genes/metrics)
+[`src/spatially_variable_genes/metrics`](https://github.com/openproblems-bio/openproblems/tree/main/src/spatially_variable_genes/metrics)
A spatially variable genes identification metric.
diff --git a/src/tasks/spatially_variable_genes/resources_scripts/process_datasets.sh b/src/tasks/spatially_variable_genes/resources_scripts/process_datasets.sh
index bdd90f9786..74b18f465c 100755
--- a/src/tasks/spatially_variable_genes/resources_scripts/process_datasets.sh
+++ b/src/tasks/spatially_variable_genes/resources_scripts/process_datasets.sh
@@ -99,7 +99,7 @@ process {
}
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision integration_build \
--pull-latest \
--main-script target/nextflow/spatially_variable_genes/workflows/process_datasets/main.nf \
diff --git a/src/tasks/spatially_variable_genes/resources_scripts/run_benchmark.sh b/src/tasks/spatially_variable_genes/resources_scripts/run_benchmark.sh
index 9539cea4fe..8620bbafe8 100755
--- a/src/tasks/spatially_variable_genes/resources_scripts/run_benchmark.sh
+++ b/src/tasks/spatially_variable_genes/resources_scripts/run_benchmark.sh
@@ -53,7 +53,7 @@ output_state: "state.yaml"
publish_dir: "$publish_dir"
HERE
-tw launch https://github.com/openproblems-bio/openproblems-v2.git \
+tw launch https://github.com/openproblems-bio/openproblems.git \
--revision integration_build \
--pull-latest \
--main-script target/nextflow/spatially_variable_genes/workflows/run_benchmark/main.nf \