diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 977b262..fe18769 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.2.14 +current_version = 0.2.17 commit = True tag = False parse = (?P\d+)\.(?P\d+)\.(?P[a-z0-9+]+) diff --git a/.gitignore b/.gitignore index 59a0c10..e4ef844 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,5 @@ __pycache__/ build/ venv/ working/ +data/ +workspace/ \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 0f7c841..25ce706 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,12 @@ ## dev +- [17](https://github.com/umccr/bolt/pull/17) - change dragen HRD file optional + +- [14](https://github.com/umccr/bolt/pull/14) - gpgr version bump to 2.2.0 + - [3](https://github.com/scwatts/bolt/pull/3) - Improve PCGR / CPSR argument handling -- [6](https://github.com/umccr/bolt/pull/6) - Change oncoanalyser v2.0.0 uptade, with switch sv caller from GRIPSS to eSVee \ No newline at end of file +- [6](https://github.com/umccr/bolt/pull/6) - Change oncoanalyser v2.0.0 uptade, with switch sv caller from GRIPSS to eSVee + +- [9](https://github.com/umccr/bolt/pull/9) Add hypermutation sample handling \ No newline at end of file diff --git a/README.md b/README.md index 323a77f..504b052 100644 --- a/README.md +++ b/README.md @@ -54,12 +54,12 @@ the software environment and dependencies. Consequently, dependencies are split | Name | Docker image URI | Commands | | --- | --- | --- | -| pcgr | ghcr.io/scwatts/bolt:0.2.14-pcgr | • `bolt smlv_germline report`
• `bolt smlv_somatic annotate`
• `bolt smlv_somatic report`
| -| gpgr | ghcr.io/scwatts/bolt:0.2.14-gpgr | • `bolt other cancer_report` | -| snpeff | ghcr.io/scwatts/bolt:0.2.14-snpeff | • `bolt sv_somatic annotate` | -| circos | ghcr.io/scwatts/bolt:0.2.14-circos | • `bolt other purple_baf_plot` | -| multiqc | ghcr.io/scwatts/bolt:0.2.14-multiqc | • `bolt other multiqc_report` | -| base | ghcr.io/scwatts/bolt:0.2.14 | • `bolt smlv_germline prepare`
• `bolt smlv_somatic rescue`
• `bolt smlv_somatic filter`
• `bolt sv_somatic prioritise`
| +| pcgr | ghcr.io/scwatts/bolt:0.2.17-pcgr | • `bolt smlv_germline report`
• `bolt smlv_somatic annotate`
• `bolt smlv_somatic report`
| +| gpgr | ghcr.io/scwatts/bolt:0.2.17-gpgr | • `bolt other cancer_report` | +| snpeff | ghcr.io/scwatts/bolt:0.2.17-snpeff | • `bolt sv_somatic annotate` | +| circos | ghcr.io/scwatts/bolt:0.2.17-circos | • `bolt other purple_baf_plot` | +| multiqc | ghcr.io/scwatts/bolt:0.2.17-multiqc | • `bolt other multiqc_report` | +| base | ghcr.io/scwatts/bolt:0.2.17 | • `bolt smlv_germline prepare`
• `bolt smlv_somatic rescue`
• `bolt smlv_somatic filter`
• `bolt sv_somatic prioritise`
| ## Usage @@ -67,7 +67,7 @@ Given the nature of software dependencies required, it is strongly recommended t [Docker images](#docker-images): ```bash -docker run -ti -v $(pwd):$(pwd) -w $(pwd) ghcr.io/scwatts/bolt:0.2.14 \ +docker run -ti -v $(pwd):$(pwd) -w $(pwd) ghcr.io/scwatts/bolt:0.2.17 \ bolt smlv_somatic filter \ --tumor_name tumor_sample \ --vcf_fp tumor_sample.vcf.gz \ diff --git a/bolt/common/constants.py b/bolt/common/constants.py index e38ed3d..cfc6aa5 100644 --- a/bolt/common/constants.py +++ b/bolt/common/constants.py @@ -4,7 +4,7 @@ ###################################### ## Variation selection (annotation) ## ###################################### -MAX_SOMATIC_VARIANTS = 500_000 +MAX_SOMATIC_VARIANTS = 450_000 MAX_SOMATIC_VARIANTS_GNOMAD_FILTER = 0.01 @@ -35,10 +35,47 @@ 'pathogenic', 'uncertain_significance', } -PCGR_TIERS_RESCUE = { +PCGR_ACTIONABILITY_TIER_RESCUE = { + '1', + '2', +} + + +################################ +## Hypermutated report filter ## +################################ +PCGR_TIERS_FILTERING = ( 'TIER_1', 'TIER_2', -} + 'TIER_3', + 'TIER_4', + 'NONCODING', +) + +VEP_IMPACTS_FILTER = ( + 'intergenic', + 'intronic', + 'downstream', + 'upstream', + 'impacts_other', +) + +GENOMIC_REGIONS_FILTERING = ( + 'difficult', + 'none', + 'giab_conf', +) + +HOTSPOT_FIELDS_FILTERING = ( + 'SAGE_HOTSPOT', + 'hotspot', + 'PCGR_MUTATION_HOTSPOT', +) + +RETAIN_FIELDS_FILTERING = ( + 'PANEL', + *HOTSPOT_FIELDS_FILTERING, +) ################################################## @@ -61,6 +98,8 @@ class VcfFilter(enum.Enum): ENCODE = 'ENCODE' GNOMAD_COMMON = 'gnomAD_common' + PCGR_COUNT_LIMIT = 'PCGR_count_limit' + @property def namespace(self): return 'FILTER' @@ -77,16 +116,14 @@ class VcfInfo(enum.Enum): SAGE_NOVEL = 'SAGE_NOVEL' SAGE_RESCUE = 'SAGE_RESCUE' - PCGR_TIER = 'PCGR_TIER' + PCGR_ACTIONABILITY_TIER = 'PCGR_ACTIONABILITY_TIER' PCGR_CSQ = 'PCGR_CSQ' PCGR_MUTATION_HOTSPOT = 'PCGR_MUTATION_HOTSPOT' - PCGR_CLINVAR_CLNSIG = 'PCGR_CLINVAR_CLNSIG' + PCGR_CLINVAR_CLASSIFICATION = 'PCGR_CLINVAR_CLASSIFICATION' PCGR_COSMIC_COUNT = 'PCGR_COSMIC_COUNT' PCGR_TCGA_PANCANCER_COUNT = 'PCGR_TCGA_PANCANCER_COUNT' PCGR_ICGC_PCAWG_COUNT = 'PCGR_ICGC_PCAWG_COUNT' - CPSR_FINAL_CLASSIFICATION = 'CPSR_FINAL_CLASSIFICATION' - CPSR_PATHOGENICITY_SCORE = 'CPSR_PATHOGENICITY_SCORE' CPSR_CLINVAR_CLASSIFICATION = 'CPSR_CLINVAR_CLASSIFICATION' CPSR_CSQ = 'CPSR_CSQ' @@ -112,7 +149,7 @@ class VcfInfo(enum.Enum): GNOMAD_AF = 'gnomAD_AF' - PCGR_TIER_RESCUE = 'PCGR_TIER_RESCUE' + PCGR_ACTIONABILITY_TIER_RESCUE = 'PCGR_ACTIONABILITY_TIER_RESCUE' SAGE_HOTSPOT_RESCUE = 'SAGE_HOTSPOT_RESCUE' CLINICAL_POTENTIAL_RESCUE = 'CLINICAL_POTENTIAL_RESCUE' @@ -121,6 +158,8 @@ class VcfInfo(enum.Enum): RESCUED_FILTERS_EXISTING = 'RESCUED_FILTERS_EXISTING' RESCUED_FILTERS_PENDING = 'RESCUED_FILTERS_PENDING' + PANEL = 'PANEL' + @property def namespace(self): return 'INFO' @@ -187,6 +226,9 @@ def namespace(self): 'Description': f'gnomAD AF >= {MAX_GNOMAD_AF}', }, + VcfFilter.PCGR_COUNT_LIMIT: { + 'Description': 'Manually filtered to meet PCGR 500,000 variant limit', + }, # INFO VcfInfo.TUMOR_AF: { @@ -226,7 +268,7 @@ def namespace(self): 'Description': 'Variant rescued by a matching SAGE call', }, - VcfInfo.PCGR_TIER: { + VcfInfo.PCGR_ACTIONABILITY_TIER: { 'Number': '1', 'Type': 'String', 'Description': ( @@ -237,28 +279,29 @@ def namespace(self): }, VcfInfo.PCGR_CSQ: { 'Number': '.', - 'Type': 'String', - 'Description': ( - 'Consequence annotations from Ensembl VEP. Format: Allele|Consequence|IMPACT|SYMBOL|' - 'Gene|Feature_type|Feature|BIOTYPE|EXON|INTRON|HGVSc|HGVSp|cDNA_position|' - 'CDS_position|Protein_position|Amino_acids|Codons|Existing_variation|ALLELE_NUM|' - 'DISTANCE|STRAND|FLAGS|PICK|VARIANT_CLASS|SYMBOL_SOURCE|HGNC_ID|CANONICAL|' - 'MANE_SELECT|MANE_PLUS_CLINICAL|TSL|APPRIS|CCDS|ENSP|SWISSPROT|TREMBL|UNIPARC|' - 'UNIPROT_ISOFORM|RefSeq|DOMAINS|HGVS_OFFSET|AF|AFR_AF|AMR_AF|EAS_AF|EUR_AF|SAS_AF|' - 'gnomAD_AF|gnomAD_AFR_AF|gnomAD_AMR_AF|gnomAD_ASJ_AF|gnomAD_EAS_AF|gnomAD_FIN_AF|' - 'gnomAD_NFE_AF|gnomAD_OTH_AF|gnomAD_SAS_AF|CLIN_SIG|SOMATIC|PHENO|CHECK_REF|' - 'NearestExonJB' - ), + 'Type': 'String', + 'Description': ( + 'Consequence annotations from Ensembl VEP. Format: ' + 'Allele|Consequence|IMPACT|SYMBOL|Gene|Feature_type|Feature|BIOTYPE|EXON|INTRON|HGVSc|' + 'HGVSp|cDNA_position|CDS_position|Protein_position|Amino_acids|Codons|Existing_variation|' + 'ALLELE_NUM|DISTANCE|STRAND|FLAGS|PICK|VARIANT_CLASS|SYMBOL_SOURCE|HGNC_ID|CANONICAL|' + 'MANE|MANE_SELECT|MANE_PLUS_CLINICAL|TSL|APPRIS|CCDS|ENSP|SWISSPROT|TREMBL|UNIPARC|' + 'UNIPROT_ISOFORM|RefSeq|DOMAINS|HGVS_OFFSET|gnomADe_AF|gnomADe_AFR_AF|gnomADe_AMR_AF|' + 'gnomADe_ASJ_AF|gnomADe_EAS_AF|gnomADe_FIN_AF|gnomADe_MID_AF|gnomADe_NFE_AF|' + 'gnomADe_REMAINING_AF|gnomADe_SAS_AF|CLIN_SIG|SOMATIC|PHENO|CHECK_REF|MOTIF_NAME|' + 'MOTIF_POS|HIGH_INF_POS|MOTIF_SCORE_CHANGE|TRANSCRIPTION_FACTORS|NearestExonJB|' + 'MaxEntScan_alt|MaxEntScan_diff|MaxEntScan_ref' + ), }, VcfInfo.PCGR_MUTATION_HOTSPOT: { 'Number': '.', 'Type': 'String', - 'Description': 'Known cancer mutation hotspot, as found in cancerhotspots.org_v2, Gene|Codon|Q-value', + 'Description': 'Known cancer mutation hotspot, as found in cancerhotspots.org. Format: GeneSymbol|Entrez_ID|CodonRefAA|Alt_AA|Q-value', }, - VcfInfo.PCGR_CLINVAR_CLNSIG: { + VcfInfo.PCGR_CLINVAR_CLASSIFICATION: { 'Number': '.', 'Type': 'String', - 'Description': 'ClinVar clinical significance', + 'Description': 'ClinVar - Overall clinical significance of variant on a five-tiered scale', }, VcfInfo.PCGR_COSMIC_COUNT: { 'Number': '1', @@ -276,23 +319,10 @@ def namespace(self): 'Description': 'Count of ICGC PCAWG hits', }, - VcfInfo.CPSR_FINAL_CLASSIFICATION: { - 'Number': '1', - 'Type': 'String', - 'Description': ( - 'Final variant classification based on the combination of CLINVAR_CLASSIFICTION (for ' - 'ClinVar-classified variants), and CPSR_CLASSIFICATION (for novel variants)' - ), - }, - VcfInfo.CPSR_PATHOGENICITY_SCORE: { - 'Number': '1', - 'Type': 'Float', - 'Description': 'Aggregated CPSR pathogenicity score', - }, VcfInfo.CPSR_CLINVAR_CLASSIFICATION: { 'Number': '1', 'Type': 'String', - 'Description': 'Clinical significance of variant on a five-tiered scale', + 'Description': 'ClinVar - Overall clinical significance of variant on a five-tiered scale', }, VcfInfo.CPSR_CSQ: { 'Number': '.', @@ -301,13 +331,13 @@ def namespace(self): 'Consequence annotations from Ensembl VEP. Format: Allele|Consequence|IMPACT|SYMBOL|' 'Gene|Feature_type|Feature|BIOTYPE|EXON|INTRON|HGVSc|HGVSp|cDNA_position|CDS_position|' 'Protein_position|Amino_acids|Codons|Existing_variation|ALLELE_NUM|DISTANCE|STRAND|' - 'FLAGS|PICK|VARIANT_CLASS|SYMBOL_SOURCE|HGNC_ID|CANONICAL|MANE_SELECT|' - 'MANE_PLUS_CLINICAL|APPRIS|CCDS|ENSP|SWISSPROT|TREMBL|UNIPARC|UNIPROT_ISOFORM|RefSeq|' - 'DOMAINS|HGVS_OFFSET|AF|AFR_AF|AMR_AF|EAS_AF|EUR_AF|SAS_AF|gnomAD_AF|gnomAD_AFR_AF|' - 'gnomAD_AMR_AF|gnomAD_ASJ_AF|gnomAD_EAS_AF|gnomAD_FIN_AF|gnomAD_NFE_AF|gnomAD_OTH_AF|' - 'gnomAD_SAS_AF|CLIN_SIG|SOMATIC|PHENO|CHECK_REF|MOTIF_NAME|MOTIF_POS|HIGH_INF_POS|' - 'MOTIF_SCORE_CHANGE|TRANSCRIPTION_FACTORS|NearestExonJB|LoF|LoF_filter|LoF_flags|' - 'LoF_info' + 'FLAGS|PICK|VARIANT_CLASS|SYMBOL_SOURCE|HGNC_ID|CANONICAL|MANE|MANE_SELECT|' + 'MANE_PLUS_CLINICAL|TSL|APPRIS|CCDS|ENSP|SWISSPROT|TREMBL|UNIPARC|UNIPROT_ISOFORM|RefSeq|' + 'DOMAINS|HGVS_OFFSET|gnomADe_AF|gnomADe_AFR_AF|gnomADe_AMR_AF|gnomADe_ASJ_AF|' + 'gnomADe_EAS_AF|gnomADe_FIN_AF|gnomADe_MID_AF|gnomADe_NFE_AF|gnomADe_REMAINING_AF|' + 'gnomADe_SAS_AF|CLIN_SIG|SOMATIC|PHENO|CHECK_REF|MOTIF_NAME|MOTIF_POS|HIGH_INF_POS|' + 'MOTIF_SCORE_CHANGE|TRANSCRIPTION_FACTORS|NearestExonJB|MaxEntScan_alt|MaxEntScan_diff|' + 'MaxEntScan_ref' ), }, @@ -316,7 +346,7 @@ def namespace(self): 'Type': 'Flag', 'Description': '', }, - VcfInfo.PCGR_TIER_RESCUE: { + VcfInfo.PCGR_ACTIONABILITY_TIER_RESCUE: { 'Number': '0', 'Type': 'Flag', 'Description': '', @@ -350,6 +380,12 @@ def namespace(self): 'Description': 'Filters pending prior to variant rescue', }, + VcfInfo.PANEL: { + 'Number': '0', + 'Type': 'Flag', + 'Description': 'UMCCR somatic panel CDS (2,000 bp padding)', + }, + # FORMAT VcfFormat.SAGE_AD: { diff --git a/bolt/common/pcgr.py b/bolt/common/pcgr.py index fce4c56..95c0f47 100644 --- a/bolt/common/pcgr.py +++ b/bolt/common/pcgr.py @@ -1,9 +1,14 @@ +import collections import csv +import functools +import gzip +import itertools import pathlib import re import shutil import tempfile - +import logging +import concurrent.futures import cyvcf2 @@ -11,6 +16,8 @@ from .. import util from ..common import constants +# Use the existing logger configuration +logger = logging.getLogger(__name__) def prepare_vcf_somatic(input_fp, tumor_name, normal_name, output_dir): @@ -110,13 +117,17 @@ def get_minimal_header(input_fh): return '\n'.join([filetype_line, *chrom_lines, *format_lines, column_line]) -def run_somatic(input_fp, pcgr_refdata_dir, output_dir, threads=1, pcgr_conda=None, pcgrr_conda=None, purity=None, ploidy=None, sample_id=None): +def run_somatic(input_fp, pcgr_refdata_dir, vep_dir, output_dir, chunk_nbr=None, threads=1, pcgr_conda=None, pcgrr_conda=None, purity=None, ploidy=None, sample_id=None): + + + output_dir = output_dir / f"pcgr_{chunk_nbr}" if chunk_nbr is not None else output_dir - # NOTE(SW): Nextflow FusionFS v2.2.8 does not support PCGR output to S3; instead write to a - # temporary directory outside of the FusionFS mounted directory then manually copy across + if output_dir.exists(): + logger.warning(f"Output directory '{output_dir}' already exists and will be overwritten") + shutil.rmtree(output_dir) - temp_dir = tempfile.TemporaryDirectory() - pcgr_output_dir = output_dir / 'pcgr/' + # Create output directory + output_dir.mkdir(parents=True, exist_ok=True) if not sample_id: sample_id = 'nosampleset' @@ -124,19 +135,20 @@ def run_somatic(input_fp, pcgr_refdata_dir, output_dir, threads=1, pcgr_conda=No command_args = [ f'--sample_id {sample_id}', f'--input_vcf {input_fp}', + f'--vep_dir {vep_dir}', + f'--refdata_dir {pcgr_refdata_dir}', f'--tumor_dp_tag TUMOR_DP', f'--tumor_af_tag TUMOR_AF', f'--control_dp_tag NORMAL_DP', f'--control_af_tag NORMAL_AF', - f'--pcgr_dir {pcgr_refdata_dir}', f'--genome_assembly grch38', f'--assay WGS', f'--estimate_signatures', - f'--estimate_msi_status', + f'--estimate_msi', f'--estimate_tmb', - f'--show_noncoding', f'--vcfanno_n_proc {threads}', - f'--vep_pick_order biotype,rank,appris,tsl,ccds,canonical,length,mane', + f'--vep_n_forks 4', + f'--vep_pick_order biotype,rank,appris,tsl,ccds,canonical,length,mane_plus_clinical,mane_select', ] # NOTE(SW): VEP pick order is applied as a successive filter: @@ -162,7 +174,7 @@ def run_somatic(input_fp, pcgr_refdata_dir, output_dir, threads=1, pcgr_conda=No command_args.append(f'--tumor_ploidy {ploidy}') # NOTE(SW): placed here to always have output directory last - command_args.append(f'--output_dir {temp_dir.name}') + command_args.append(f'--output_dir {output_dir}') delimiter_padding = ' ' * 10 delimiter = f' \\\n{delimiter_padding}' @@ -171,7 +183,7 @@ def run_somatic(input_fp, pcgr_refdata_dir, output_dir, threads=1, pcgr_conda=No command = fr''' pcgr \ - {command_args_str} + {command_args_str} ''' if pcgr_conda: @@ -179,44 +191,60 @@ def run_somatic(input_fp, pcgr_refdata_dir, output_dir, threads=1, pcgr_conda=No command_formatting = '\n' + ' ' * 4 command = command_formatting + command_conda + command - util.execute_command(command) + # Log file path + log_file_path = output_dir / "run_somatic.log" + + # Run the command and redirect output to the log file + util.execute_command(command, log_file_path=log_file_path) - shutil.copytree(temp_dir.name, pcgr_output_dir) + pcgr_tsv_fp = pathlib.Path(output_dir) / f'{sample_id}.pcgr.grch38.snv_indel_ann.tsv.gz' + pcgr_vcf_fp = pathlib.Path(output_dir) / f'{sample_id}.pcgr.grch38.pass.vcf.gz' - return pcgr_output_dir + # Check if both files exist + if not pcgr_tsv_fp.exists(): + raise FileNotFoundError(f"Expected file {pcgr_tsv_fp} not found.") + if not pcgr_vcf_fp.exists(): + raise FileNotFoundError(f"Expected file {pcgr_vcf_fp} not found.") + return pcgr_tsv_fp, pcgr_vcf_fp -def run_germline(input_fp, panel_fp, pcgr_refdata_dir, output_dir, threads=1, pcgr_conda=None, pcgrr_conda=None, sample_id=None): + +def run_germline(input_fp, panel_fp, pcgr_refdata_dir, vep_dir, output_dir, threads=1, pcgr_conda=None, pcgrr_conda=None, sample_id=None): if not sample_id: sample_id = 'nosampleset' - # NOTE(SW): Nextflow FusionFS v2.2.8 does not support PCGR output to S3; instead write to a - # temporary directory outside of the FusionFS mounted directory then manually copy across - temp_dir = tempfile.TemporaryDirectory() cpsr_output_dir = output_dir / 'cpsr/' + if cpsr_output_dir.exists(): + logger.warning(f"Output directory '{cpsr_output_dir}' already exists and will be overwritten") + shutil.rmtree(cpsr_output_dir) + + # Create output directory + cpsr_output_dir.mkdir(parents=True, exist_ok=True) + command_args = [ f'--sample_id {sample_id}', f'--input_vcf {input_fp}', f'--genome_assembly grch38', f'--custom_list {panel_fp}', + f'--vep_dir {vep_dir}', + f'--refdata_dir {pcgr_refdata_dir}', # NOTE(SW): probably useful to add versioning information here; weigh against maintainence # burden f'--custom_list_name umccr_germline_panel', f'--pop_gnomad global', f'--classify_all', - f'--pcgr_dir {pcgr_refdata_dir}', f'--vcfanno_n_proc {threads}', - f'--vep_pick_order biotype,rank,appris,tsl,ccds,canonical,length,mane', + f'--vep_pick_order biotype,rank,appris,tsl,ccds,canonical,length,mane_plus_clinical,mane_select', ] if pcgrr_conda: command_args.append(f'--pcgrr_conda {pcgrr_conda}') # NOTE(SW): placed here to always have output directory last - command_args.append(f'--output_dir {temp_dir.name}') + command_args.append(f'--output_dir {cpsr_output_dir}') delimiter_padding = ' ' * 10 delimiter = f' \\\n{delimiter_padding}' @@ -235,22 +263,21 @@ def run_germline(input_fp, panel_fp, pcgr_refdata_dir, output_dir, threads=1, pc util.execute_command(command) - shutil.copytree(temp_dir.name, cpsr_output_dir) - return cpsr_output_dir -def transfer_annotations_somatic(input_fp, tumor_name, filter_name, pcgr_dir, output_dir): +def transfer_annotations_somatic(input_fp, tumor_name, pcgr_vcf_fp, pcgr_tsv_fp, output_dir): # Set destination INFO field names and source TSV fields info_field_map = { constants.VcfInfo.PCGR_MUTATION_HOTSPOT: 'MUTATION_HOTSPOT', - constants.VcfInfo.PCGR_CLINVAR_CLNSIG: 'CLINVAR_CLNSIG', + constants.VcfInfo.PCGR_CLINVAR_CLASSIFICATION: 'CLINVAR_CLASSIFICATION', constants.VcfInfo.PCGR_TCGA_PANCANCER_COUNT: 'TCGA_PANCANCER_COUNT', constants.VcfInfo.PCGR_CSQ: 'CSQ', } - pcgr_tsv_fp = pathlib.Path(pcgr_dir) / 'nosampleset.pcgr_acmg.grch38.snvs_indels.tiers.tsv' - pcgr_vcf_fp = pathlib.Path(pcgr_dir) / 'nosampleset.pcgr_acmg.grch38.vcf.gz' + # Respect paths provided; do not override + pcgr_tsv_fp = pathlib.Path(pcgr_tsv_fp) + pcgr_vcf_fp = pathlib.Path(pcgr_vcf_fp) # Enforce matching defined and source INFO annotations check_annotation_headers(info_field_map, pcgr_vcf_fp) @@ -261,13 +288,11 @@ def transfer_annotations_somatic(input_fp, tumor_name, filter_name, pcgr_dir, ou # Open filehandles, set required header entries input_fh = cyvcf2.VCF(input_fp) - util.add_vcf_header_entry(input_fh, constants.VcfInfo.PCGR_TIER) + util.add_vcf_header_entry(input_fh, constants.VcfInfo.PCGR_ACTIONABILITY_TIER) util.add_vcf_header_entry(input_fh, constants.VcfInfo.PCGR_CSQ) util.add_vcf_header_entry(input_fh, constants.VcfInfo.PCGR_MUTATION_HOTSPOT) - util.add_vcf_header_entry(input_fh, constants.VcfInfo.PCGR_CLINVAR_CLNSIG) - util.add_vcf_header_entry(input_fh, constants.VcfInfo.PCGR_COSMIC_COUNT) + util.add_vcf_header_entry(input_fh, constants.VcfInfo.PCGR_CLINVAR_CLASSIFICATION) util.add_vcf_header_entry(input_fh, constants.VcfInfo.PCGR_TCGA_PANCANCER_COUNT) - util.add_vcf_header_entry(input_fh, constants.VcfInfo.PCGR_ICGC_PCAWG_COUNT) output_fp = output_dir / f'{tumor_name}.annotations.vcf.gz' output_fh = cyvcf2.Writer(output_fp, input_fh, 'wz') @@ -277,25 +302,20 @@ def transfer_annotations_somatic(input_fp, tumor_name, filter_name, pcgr_dir, ou # Do not process chrM since *snvs_indels.tiers.tsv does not include these annotations if record.CHROM == 'chrM': continue - # Immediately print out variants that were not annotated - if filter_name in record.FILTERS: - output_fh.write_record(record) - continue # Annotate and write - record_ann = annotate_record(record, pcgr_data) + record_ann = annotate_record(record, pcgr_data, allow_missing=True) output_fh.write_record(record_ann) def transfer_annotations_germline(input_fp, normal_name, cpsr_dir, output_dir): # Set destination INFO field names and source TSV fields + # Note: Only include fields that exist in CPSR v2.2.1 output info_field_map = { - constants.VcfInfo.CPSR_FINAL_CLASSIFICATION: 'FINAL_CLASSIFICATION', - constants.VcfInfo.CPSR_PATHOGENICITY_SCORE: 'CPSR_PATHOGENICITY_SCORE', constants.VcfInfo.CPSR_CLINVAR_CLASSIFICATION: 'CLINVAR_CLASSIFICATION', constants.VcfInfo.CPSR_CSQ: 'CSQ', } - cpsr_tsv_fp = pathlib.Path(cpsr_dir) / f'{normal_name}.cpsr.grch38.snvs_indels.tiers.tsv' + cpsr_tsv_fp = pathlib.Path(cpsr_dir) / f'{normal_name}.cpsr.grch38.classification.tsv.gz' cpsr_vcf_fp = pathlib.Path(cpsr_dir) / f'{normal_name}.cpsr.grch38.vcf.gz' # Enforce matching defined and source INFO annotations @@ -307,8 +327,6 @@ def transfer_annotations_germline(input_fp, normal_name, cpsr_dir, output_dir): # Open filehandles, set required header entries input_fh = cyvcf2.VCF(input_fp) - util.add_vcf_header_entry(input_fh, constants.VcfInfo.CPSR_FINAL_CLASSIFICATION) - util.add_vcf_header_entry(input_fh, constants.VcfInfo.CPSR_PATHOGENICITY_SCORE) util.add_vcf_header_entry(input_fh, constants.VcfInfo.CPSR_CLINVAR_CLASSIFICATION) util.add_vcf_header_entry(input_fh, constants.VcfInfo.CPSR_CSQ) @@ -330,49 +348,54 @@ def check_annotation_headers(info_field_map, vcf_fp): # Ensure header descriptions from source INFO annotations match those defined here for the # output file; force manual inspection where they do not match vcf_fh = cyvcf2.VCF(vcf_fp) + mismatches = list() for header_dst, header_src in info_field_map.items(): # Skip header lines that do not have an equivalent entry in the PCGR/CPSR VCF try: header_src_entry = vcf_fh.get_header_type(header_src) except KeyError: + print(f"Missing in VCF: {header_src}") continue header_dst_entry = util.get_vcf_header_entry(header_dst) # Remove leading and trailing quotes from source header_src_description_unquoted = header_src_entry['Description'].strip('"') - assert header_src_description_unquoted == header_dst_entry['Description'] + if header_src_description_unquoted != header_dst_entry['Description']: + mismatches.append( + f"Mismatch for {header_src}:\n" + f"VCF: {header_src_description_unquoted}\n" + f"Expected: {header_dst_entry['Description']}" + ) + if mismatches: + mismatch_msg = "\n\n".join(mismatches) + raise AssertionError(f"Mismatched INFO annotations:\n{mismatch_msg}") def collect_pcgr_annotation_data(tsv_fp, vcf_fp, info_field_map): # Gather all annotations from TSV data_tsv = dict() - with open(tsv_fp, 'r') as tsv_fh: + # Read gz or plain text based on extension + open_fn = gzip.open if str(tsv_fp).endswith('.gz') else open + with open_fn(tsv_fp, 'rt') as tsv_fh: for record in csv.DictReader(tsv_fh, delimiter='\t'): key, record_ann = get_annotation_entry_tsv(record, info_field_map) assert key not in data_tsv - # Process PCGR_TIER - # TIER_1, TIER_2, TIER_3, TIER_4, NONCODING - record_ann[constants.VcfInfo.PCGR_TIER] = record['TIER'].replace(' ', '_') - - # Count COSMIC hits - if record['COSMIC_MUTATION_ID'] == 'NA': - cosmic_count = 0 + # Normalize PCGR actionability tier to simple values: '1','2','3','4','N' + raw_tier = (record.get('ACTIONABILITY_TIER') or '').strip() + tier_norm = raw_tier.replace('_', ' ').upper() + if tier_norm in ('TIER 1','TIER1','1'): + tier_val = '1' + elif tier_norm in ('TIER 2','TIER2','2'): + tier_val = '2' + elif tier_norm in ('TIER 3','TIER3','3'): + tier_val = '3' + elif tier_norm in ('TIER 4','TIER4','4'): + tier_val = '4' else: - cosmic_count = len(record['COSMIC_MUTATION_ID'].split('&')) - record_ann[constants.VcfInfo.PCGR_COSMIC_COUNT] = cosmic_count - - # Count ICGC-PCAWG hits by taking sum of affected donors where the annotation value has - # the following format: project_code|tumor_type|affected_donors|tested_donors|frequency - icgc_pcawg_count = 0 - if record['ICGC_PCAWG_OCCURRENCE'] != 'NA': - for pcawg_hit_data in record['ICGC_PCAWG_OCCURRENCE'].split(','): - pcawrg_hit_data_fields = pcawg_hit_data.split('|') - affected_donors = int(pcawrg_hit_data_fields[2]) - icgc_pcawg_count += affected_donors - assert icgc_pcawg_count > 0 - record_ann[constants.VcfInfo.PCGR_ICGC_PCAWG_COUNT] = icgc_pcawg_count + tier_val = 'N' + record_ann[constants.VcfInfo.PCGR_ACTIONABILITY_TIER] = tier_val # Store annotation data data_tsv[key] = record_ann @@ -388,7 +411,7 @@ def collect_cpsr_annotation_data(tsv_fp, vcf_fp, info_field_map): # Gather annotations from TSV data_tsv = dict() gdot_re = re.compile('^(?P[\dXYM]+):g\.(?P\d+)(?P[A-Z]+)>(?P[A-Z]+)$') - with open(tsv_fp, 'r') as tsv_fh: + with gzip.open(tsv_fp, 'rt') as tsv_fh: for record in csv.DictReader(tsv_fh, delimiter='\t'): # Decompose CPSR 'GENOMIC_CHANGE' field into CHROM, POS, REF, and ALT re_result = gdot_re.match(record['GENOMIC_CHANGE']) @@ -410,6 +433,24 @@ def collect_cpsr_annotation_data(tsv_fp, vcf_fp, info_field_map): # Compile annotations, prefering TSV source return compile_annotation_data(data_tsv, data_vcf) +def parse_genomic_change(genomic_change): + """ + Parse a genomic change string, e.g., "3:g.41224645T>C" + Returns a tuple: (chrom, pos, ref, alt) + """ + # Regular expression for the format "chrom:g.posRef>Alt" + pattern = r'^(?P\w+):g\.(?P\d+)(?P\w+)>(?P\w+)$' + match = re.match(pattern, genomic_change) + if not match: + raise ValueError(f"Format not recognized: {genomic_change}") + + # Get values and format as needed + chrom = f"chr{match.group('chrom')}" + pos = int(match.group('pos')) + ref = match.group('ref') + alt = match.group('alt') + return chrom, pos, ref, alt + def get_annotations_vcf(vcf_fp, info_field_map): data_vcf = dict() @@ -429,10 +470,20 @@ def get_annotations_vcf(vcf_fp, info_field_map): def get_annotation_entry_tsv(record, info_field_map): - # Set lookup key; PCGR/CPSR strips leading 'chr' from contig names - chrom = f'chr{record["CHROM"]}' - pos = int(record['POS']) - key = (chrom, pos, record['REF'], record['ALT']) + # If GENOMIC_CHANGE is present, parse it; otherwise, fallback to CHROM/POS/REF/ALT fields + chrom = pos = ref = alt = None + if 'GENOMIC_CHANGE' in record and record['GENOMIC_CHANGE']: + chrom, pos, ref, alt = parse_genomic_change(record['GENOMIC_CHANGE']) + else: + chrom = record.get('CHROM') or record.get('Chromosome') + pos = int(record.get('POS') or record.get('Start_position')) + ref = record.get('REF') + alt = record.get('ALT') + # Ensure chrom has 'chr' prefix + if chrom and not str(chrom).startswith('chr'): + chrom = f'chr{chrom}' + + key = (chrom, pos, ref, alt) record_ann = dict() for info_dst, info_src in info_field_map.items(): @@ -481,3 +532,198 @@ def annotate_record(record, annotations, *, allow_missing=False): record.INFO[info_enum.value] = v return record + +def split_vcf(input_vcf, output_dir, *, max_variants=None): + """ + Splits a VCF file into multiple chunks, each containing up to max_variants variants. + Each chunk includes the VCF header. + Ensures no overlapping positions between chunks. + """ + if max_variants is None: + max_variants = constants.MAX_SOMATIC_VARIANTS + elif max_variants <= 0: + raise ValueError("max_variants must be a positive integer.") + + output_dir = pathlib.Path(output_dir / "vcf_chunks") + output_dir.mkdir(parents=True, exist_ok=True) + chunk_files = [] + chunk_number = 1 + variant_count = 0 + input_vcf = pathlib.Path(input_vcf) + base_filename = input_vcf.stem + chunk_filename = output_dir / f"{base_filename}_chunk{chunk_number}.vcf" + chunk_files.append(chunk_filename) + # Open the input VCF using cyvcf2 + vcf_in = cyvcf2.VCF(input_vcf) + # Create a new VCF file for the first chunk + vcf_out = cyvcf2.Writer(str(chunk_filename), vcf_in) + last_position = None + for record in vcf_in: + current_position = record.POS + # Check if we need to start a new chunk + if variant_count >= max_variants and (last_position is None or current_position != last_position): + # Close the current chunk file and start a new one + vcf_out.close() + chunk_number += 1 + chunk_filename = output_dir / f"{base_filename}_chunk{chunk_number}.vcf" + chunk_files.append(chunk_filename) + vcf_out = cyvcf2.Writer(str(chunk_filename), vcf_in) + variant_count = 0 + # Write the record to the current chunk + vcf_out.write_record(record) + variant_count += 1 + last_position = current_position + # Close the last chunk file + vcf_out.close() + vcf_in.close() + logger.info(f"VCF file split into {len(chunk_files)} chunks.") + return chunk_files + +def run_somatic_chunck(vcf_chunks, pcgr_data_dir, vep_dir, output_dir, pcgr_output_dir, max_threads, pcgr_conda, pcgrr_conda): + pcgr_tsv_files = [] + pcgr_vcf_files = [] + num_chunks = len(vcf_chunks) + # Ensure we don't use more workers than available threads, and each worker has at least 2 threads + max_workers = min(num_chunks, max_threads // 2) + threads_quot, threads_rem = divmod(max_threads, num_chunks) + threads_per_chunk = max(2, threads_quot) + # Limit the number of workers to the smaller of num_chunks or max_threads // 2 + with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor: + futures = {} + for chunk_number, vcf_file in enumerate(vcf_chunks, start=1): + # Assign extra thread to the first 'threads_rem' chunks + additional_thread = 1 if chunk_number <= threads_rem else 0 + total_threads = threads_per_chunk + additional_thread + futures[executor.submit(run_somatic, vcf_file, pcgr_data_dir, vep_dir, pcgr_output_dir, chunk_number, total_threads, pcgr_conda, pcgrr_conda)] = chunk_number + for future in concurrent.futures.as_completed(futures): + try: + pcgr_tsv_fp, pcgr_vcf_fp = future.result() + if pcgr_tsv_fp: + pcgr_tsv_files.append(pcgr_tsv_fp) + if pcgr_vcf_fp: + pcgr_vcf_files.append(pcgr_vcf_fp) + except Exception: + chunk_number = futures[future] + logger.exception(f"Exception occurred while processing PCGR chunk {chunk_number}.") + merged_vcf_fp, merged_tsv_fp = merging_pcgr_files(output_dir, pcgr_vcf_files, pcgr_tsv_files) + return merged_tsv_fp, merged_vcf_fp + +def merging_pcgr_files(output_dir, pcgr_vcf_files, pcgr_tsv_fp): + pcgr_dir = pathlib.Path(output_dir) / 'pcgr' + pcgr_dir.mkdir(exist_ok=True) + + # Merge all TSV files into a single file in the pcgr directory + merged_tsv_fp = pcgr_dir / "nosampleset.pcgr_acmg.grch38.snvs_indels.tiers.tsv.gz" + util.merge_tsv_files(pcgr_tsv_fp, merged_tsv_fp) + + # Step 5: Merge all VCF files into a single file in the pcgr directory + merged_vcf_path = pcgr_dir / "nosampleset.pcgr.grch38.pass" + merged_vcf = util.merge_vcf_files(pcgr_vcf_files, merged_vcf_path) + + return merged_vcf, merged_tsv_fp + + +def get_variant_filter_data(variant): + attribute_names = ( + 'tier', + 'difficult', + 'giab_conf', + 'intergenic', + 'intronic', + 'downstream', + 'upstream', + 'impacts_other', + ) + + data = {e: None for e in attribute_names} + + + data['tier'] = variant.INFO.get('PCGR_ACTIONABILITY_TIER') + + + info_keys = [k for k, v in variant.INFO] + + data['difficult'] = any(e.startswith('DIFFICULT') for e in info_keys) + data['giab_conf'] = 'GIAB_CONF' in info_keys + + + # NOTE(SW): GIAB_CONF always overrides DIFFICULT tags + if data['giab_conf'] and data['difficult']: + data['difficult'] = False + + + for impact in get_impacts(variant.INFO['PCGR_CSQ']): + if impact == 'intergenic_variant': + data['intergenic'] = True + elif impact == 'intron_variant': + data['intronic'] = True + elif impact == 'downstream_gene_variant': + data['downstream'] = True + elif impact == 'upstream_gene_variant': + data['upstream'] = True + elif impact: + data['impacts_other'] = True + else: + assert False + + return data + + +def get_impacts(csq_str_full): + impacts = set() + for csq_str in csq_str_full.split(','): + csq_tokens = csq_str.split('|') + impact_str = csq_tokens[1] + impacts.update(impact_str.split('&')) + return impacts + + +def determine_filter(data): + + for impact, region in get_ordering(tiers=False): + + # NOTE(SW): this is less efficient than nested loops since the outer block is reevaluated + # within what would be the inner loop each cycle; taking this route for cleaner code + + impacts_higher = get_impacts_higher(impact) + impact_filter = bool(data[impact]) and not any(bool(data[e]) for e in impacts_higher) + + region_filter = False + if region == 'none': + region_filter = not (data['difficult'] or data['giab_conf']) + else: + region_filter = data[region] + + if impact_filter and region_filter: + return (impact, region) + + return False + + +def get_variant_repr(variant): + return (variant.CHROM, variant.POS, variant.REF, tuple(variant.ALT)) + + +@functools.cache +def get_ordering(tiers=True, impacts=True, regions=True): + categories = [ + constants.PCGR_TIERS_FILTERING if tiers else None, + constants.VEP_IMPACTS_FILTER if impacts else None, + constants.GENOMIC_REGIONS_FILTERING if regions else None, + ] + + # NOTE(SW): I'm not aware of any noncoding impacts for TIER_[1-4] other than TERT but keeping + # in to be overly cautious + ordering_iter = itertools.product(*(c for c in categories if c)) + + return tuple(ordering_iter) + + +@functools.cache +def get_impacts_higher(impact): + impact_index = constants.VEP_IMPACTS_FILTER.index(impact) + if impact_index + 1 < len(constants.VEP_IMPACTS_FILTER): + impacts_higher = constants.VEP_IMPACTS_FILTER[impact_index+1:len(constants.VEP_IMPACTS_FILTER)] + else: + impacts_higher = list() + return impacts_higher diff --git a/bolt/logging_config.py b/bolt/logging_config.py new file mode 100644 index 0000000..0ddda2b --- /dev/null +++ b/bolt/logging_config.py @@ -0,0 +1,34 @@ +import logging +import sys +import pathlib +from datetime import datetime + +class IgnoreTinfoFilter(logging.Filter): + def filter(self, record): + # Exclude messages that contain the unwanted text. + if "no version information available" in record.getMessage(): + return False + return True + +def setup_logging(output_dir, script_name): + # Create a timestamp for the log file + timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') + log_filename = f"{script_name}_{timestamp}.log" + log_file = pathlib.Path(output_dir) / log_filename + + # Create individual handlers. + console_handler = logging.StreamHandler(sys.stdout) + file_handler = logging.FileHandler(log_file) + + # Instantiate and attach the filter to both handlers. + tinfo_filter = IgnoreTinfoFilter() + console_handler.addFilter(tinfo_filter) + file_handler.addFilter(tinfo_filter) + + logging.basicConfig( + level=logging.DEBUG, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', + handlers=[file_handler, console_handler] + ) + logger = logging.getLogger(__name__) + logger.info("Logging setup complete") \ No newline at end of file diff --git a/bolt/util.py b/bolt/util.py index b7333d9..8065302 100644 --- a/bolt/util.py +++ b/bolt/util.py @@ -1,11 +1,16 @@ +import gzip import pathlib import subprocess -import sys import textwrap +import logging +from types import SimpleNamespace +import cyvcf2 from .common import constants +# Set up logging +logger = logging.getLogger(__name__) # TODO(SW): create note that number this assumes location of `//` def get_project_root(): @@ -15,46 +20,58 @@ def get_project_root(): return project_root -def execute_command(command): - command_prepared = command_prepare(command) +def execute_command(command, log_file_path=None): + logger.info("Executing command: %s", command.strip()) - print(command_prepared) + # Open the log file if provided + log_file = log_file_path.open('a', encoding='utf-8') if log_file_path else None - process = subprocess.run( - command_prepared, + # Launch process with combined stdout and stderr streams, and line buffering enabled. + process = subprocess.Popen( + command, shell=True, executable='/bin/bash', - capture_output=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, encoding='utf-8', + bufsize=1 # line buffered ) - if process.returncode != 0: - print(process) - print(process.stderr) - sys.exit(1) - - return process + output_lines = [] + # Iterate over each line as it becomes available + with process.stdout: + for line in iter(process.stdout.readline, ''): + if line: + logger.info(line.strip()) + output_lines.append(line) + if log_file: + log_file.write(line) + log_file.flush() # flush immediately for real-time logging + process.wait() # wait for the process to complete + + if log_file: + log_file.close() + + result = SimpleNamespace( + stdout=''.join(output_lines), + returncode=process.returncode, + pid=process.pid, + command=command + ) + return result def command_prepare(command): return f'set -o pipefail; {textwrap.dedent(command)}' - -#def count_vcf_records(fp, exclude_args=None): -# args = list() -# if exclude_args: -# args.append(f'-e \'{exclude_args}\'') -# -# args_str = ' '.join(args) -# command = f'bcftools view -H {args_str} {fp} | wc -l' -# -# result = execute_command(command) -# return int(result.stdout) - - def count_vcf_records(fp): - result = execute_command(f'bcftools view -H {fp} | wc -l') - return int(result.stdout) + result = subprocess.run(f'bcftools view -H {fp} | wc -l', + shell=True, + executable="/bin/bash", + capture_output=True, + text=True ) + return int(result.stdout.strip()) def add_vcf_header_entry(fh, anno_enum): @@ -94,8 +111,141 @@ def get_qualified_vcf_annotation(anno_enum): assert anno_enum in constants.VcfInfo or anno_enum in constants.VcfFormat return f'{anno_enum.namespace}/{anno_enum.value}' - -#def add_vcf_filter(record, filter_enum): -# existing_filters = [e for e in record.FILTERS if e != 'PASS'] -# assert filter_enum.value not in existing_filters -# return ';'.join([*existing_filters, filter_enum.value]) +def split_vcf(input_vcf, output_dir): + """ + Splits a VCF file into multiple chunks, each containing up to max_variants variants. + Each chunk includes the VCF header. + Ensures no overlapping positions between chunks. + """ + output_dir = pathlib.Path(output_dir / "vcf_chunks") + output_dir.mkdir(parents=True, exist_ok=True) + + chunk_files = [] + chunk_number = 1 + variant_count = 0 + input_vcf = pathlib.Path(input_vcf) + base_filename = input_vcf.stem + chunk_filename = output_dir / f"{base_filename}_chunk{chunk_number}.vcf" + chunk_files.append(chunk_filename) + + # Open the input VCF using cyvcf2 + vcf_in = cyvcf2.VCF(input_vcf) + # Create a new VCF file for the first chunk + vcf_out = cyvcf2.Writer(str(chunk_filename), vcf_in) + + last_position = None + + for record in vcf_in: + current_position = record.POS + # Check if we need to start a new chunk + if variant_count >= constants.MAX_SOMATIC_VARIANTS and (last_position is None or current_position != last_position): + # Close the current chunk file and start a new one + vcf_out.close() + chunk_number += 1 + chunk_filename = output_dir / f"{base_filename}_chunk{chunk_number}.vcf" + chunk_files.append(chunk_filename) + vcf_out = cyvcf2.Writer(str(chunk_filename), vcf_in) + variant_count = 0 + + # Write the record to the current chunk + vcf_out.write_record(record) + variant_count += 1 + last_position = current_position + + # Close the last chunk file + vcf_out.close() + vcf_in.close() + + logger.info(f"VCF file split into {len(chunk_files)} chunks.") + + return chunk_files + +def merge_tsv_files(tsv_files, merged_tsv_fp): + """ + Merge gzipped TSV files into a single gzipped TSV. + """ + + with gzip.open(merged_tsv_fp, 'wt', encoding='utf-8') as merged_tsv: + for i, tsv_file in enumerate(tsv_files): + with gzip.open(tsv_file, 'rt', encoding='utf-8') as infile: + for line_number, line in enumerate(infile): + # Skip header except for the first file + if i > 0 and line_number == 0: + continue + merged_tsv.write(line) + logger.info(f"Merged TSV written to: {merged_tsv_fp}") + + +def merge_vcf_files(vcf_files, merged_vcf_fp): + """ + Merges multiple VCF files into a single sorted VCF file using bcftools. + + Parameters: + - vcf_files: List of paths to VCF files to be merged. + - merged_vcf_fp: Path to the output merged VCF file (without extension). + + Returns: + - Path to the sorted merged VCF file. + """ + merged_vcf_fp = pathlib.Path(merged_vcf_fp) + merged_unsorted_vcf = merged_vcf_fp.with_suffix('.unsorted.vcf.gz') + merged_vcf = merged_vcf_fp.with_suffix('.vcf.gz') + + # Prepare the bcftools merge command arguments + command_args = [ + 'bcftools merge', + '-m all', + '-Oz', + f'-o {merged_unsorted_vcf}', + ] + [str(vcf_file) for vcf_file in vcf_files] + + # Format the command for readability + delimiter_padding = ' ' * 10 + delimiter = f' \\\n{delimiter_padding}' + command_args_str = delimiter.join(command_args) + + command = f''' + {command_args_str} + ''' + + # Run the bcftools merge command + logger.info("Running bcftools merge...") + execute_command(command) + logger.info(f"Merged VCF written to: {merged_unsorted_vcf}") + + # Sort the merged VCF file + sort_command_args = [ + 'bcftools sort', + '-Oz', + f'-o {merged_vcf}', + f'{merged_unsorted_vcf}' + ] + sort_command_args_str = delimiter.join(sort_command_args) + sort_command = f''' + {sort_command_args_str} + ''' + + logger.info("Sorting merged VCF file...") + execute_command(sort_command) + logger.info(f"Sorted merged VCF written to: {merged_vcf}") + + # Index the sorted merged VCF file + index_command_args = [ + 'bcftools index', + '-t', + f'{merged_vcf}' + ] + index_command_args_str = delimiter.join(index_command_args) + index_command = f''' + {index_command_args_str} + ''' + + logger.info("Indexing sorted merged VCF file...") + execute_command(index_command) + logger.info(f"Indexed merged VCF file: {merged_vcf}.tbi") + + # Optionally, remove the unsorted merged VCF file + if merged_unsorted_vcf.exists(): + merged_unsorted_vcf.unlink() + + return merged_vcf diff --git a/bolt/workflows/other/cancer_report.py b/bolt/workflows/other/cancer_report.py index 0799cd1..9ad3d93 100644 --- a/bolt/workflows/other/cancer_report.py +++ b/bolt/workflows/other/cancer_report.py @@ -5,6 +5,9 @@ from ... import util +from ...logging_config import setup_logging + + @click.command(name='cancer_report') @@ -28,8 +31,12 @@ @click.option('--purple_dir', required=True, type=click.Path(exists=True)) @click.option('--virusbreakend_dir', required=True, type=click.Path(exists=True)) +@click.option('--mutpat_dir', required=True, type=click.Path(exists=True)) + +@click.option('--hrdetect_file', required=True, type=click.Path(exists=True)) +@click.option('--chord_file', required=True, type=click.Path(exists=True)) -@click.option('--dragen_hrd_fp', required=True, type=click.Path(exists=True)) +@click.option('--dragen_hrd_fp', required=False, type=click.Path(exists=True)) @click.option('--cancer_genes_fp', required=True, type=click.Path(exists=True)) @click.option('--oncokb_genes_fp', required=True, type=click.Path(exists=True)) @@ -44,6 +51,9 @@ def entry(ctx, **kwargs): output_dir = pathlib.Path(kwargs['output_dir']) output_dir.mkdir(mode=0o755, parents=True, exist_ok=True) + script_name = pathlib.Path(__file__).stem + setup_logging(output_dir, script_name) + # Normalise SAGE variants and remove duplicates that arise for MutationalPattern compatibility decomposed_snv_vcf = normalise_and_dedup_sage_variants( kwargs['smlv_somatic_vcf_fp'], @@ -59,9 +69,14 @@ def entry(ctx, **kwargs): ) # Set other required argument values - batch_name = f'{kwargs["subject_name"]}_{kwargs["tumor_name"]}' + batch_name = f'{kwargs['subject_name']}_{kwargs['tumor_name']}' output_table_dir = output_dir / 'cancer_report_tables' + # Optional dragen hrd argument + dragen_hrd_arg = '' + if kwargs['dragen_hrd_fp']: + dragen_hrd_arg = f"--dragen_hrd {kwargs['dragen_hrd_fp']}" + # Run gpgr canrep command = fr''' gpgr.R canrep \ @@ -88,15 +103,19 @@ def entry(ctx, **kwargs): --virusbreakend_tsv {kwargs['virusbreakend_dir']}/{kwargs['tumor_name']}.virusbreakend.vcf.summary.tsv \ --virusbreakend_vcf {kwargs['virusbreakend_dir']}/{kwargs['tumor_name']}.virusbreakend.vcf \ \ - --dragen_hrd {kwargs['dragen_hrd_fp']} \ + {dragen_hrd_arg} \ --bcftools_stats {kwargs['smlv_somatic_bcftools_stats_fp']} \ \ --key_genes {kwargs['cancer_genes_fp']} \ --oncokb_genes {kwargs['oncokb_genes_fp']} \ \ + --mutpat_dir {kwargs['mutpat_dir']} \ + --hrdetect_file {kwargs['hrdetect_file']} \ + --chord_file {kwargs['chord_file']} \ + \ --img_dir {output_image_dir}/ \ --result_outdir {output_table_dir}/ \ - --out_file {output_dir}/{kwargs["tumor_name"]}.cancer_report.html + --out_file {output_dir}/{kwargs['tumor_name']}.cancer_report.html ''' util.execute_command(command) diff --git a/bolt/workflows/smlv_germline/report.py b/bolt/workflows/smlv_germline/report.py index 0742c19..395268f 100644 --- a/bolt/workflows/smlv_germline/report.py +++ b/bolt/workflows/smlv_germline/report.py @@ -1,5 +1,6 @@ import pathlib import yaml +from ...logging_config import setup_logging import click @@ -22,6 +23,7 @@ @click.option('--germline_panel_list_fp', required=True, type=click.Path(exists=True)) @click.option('--pcgr_data_dir', required=True, type=click.Path(exists=True)) +@click.option('--vep_dir', required=True, type=click.Path(exists=True)) @click.option('--threads', required=True, type=int, default=1) @@ -35,6 +37,9 @@ def entry(ctx, **kwargs): output_dir = pathlib.Path(kwargs['output_dir']) output_dir.mkdir(mode=0o755, parents=True, exist_ok=True) + # Set up logging + script_name = pathlib.Path(__file__).stem + setup_logging(output_dir, script_name) # BCFtools stats run_bcftool_stats(kwargs['vcf_unfiltered_fp'], kwargs['normal_name'], output_dir) @@ -69,6 +74,7 @@ def entry(ctx, **kwargs): cpsr_prep_fp, kwargs['germline_panel_list_fp'], kwargs['pcgr_data_dir'], + kwargs['vep_dir'], output_dir, threads=kwargs['threads'], pcgr_conda=kwargs['pcgr_conda'], diff --git a/bolt/workflows/smlv_somatic/annotate.py b/bolt/workflows/smlv_somatic/annotate.py index f163d5f..32b85b3 100644 --- a/bolt/workflows/smlv_somatic/annotate.py +++ b/bolt/workflows/smlv_somatic/annotate.py @@ -1,14 +1,13 @@ +import logging import pathlib - - import click import cyvcf2 - from ... import util from ...common import constants from ...common import pcgr - +from ...logging_config import setup_logging +logger = logging.getLogger(__name__) @click.command(name='annotate') @click.pass_context @@ -23,6 +22,7 @@ @click.option('--pon_dir', required=True, type=click.Path(exists=True)) @click.option('--pcgr_data_dir', required=True, type=click.Path(exists=True)) +@click.option('--vep_dir', required=True, type=click.Path(exists=True)) @click.option('--pcgr_conda', required=False, type=str) @click.option('--pcgrr_conda', required=False, type=str) @@ -30,6 +30,7 @@ @click.option('--threads', required=False, default=4, type=int) @click.option('--output_dir', required=True, type=click.Path()) +@click.option('--pcgr_variant_chunk_size', required=False, type=int, help='Override maximum variants per PCGR chunk.') def entry(ctx, **kwargs): '''Annotate variants with information from several sources\f @@ -44,6 +45,9 @@ def entry(ctx, **kwargs): output_dir = pathlib.Path(kwargs['output_dir']) output_dir.mkdir(mode=0o755, parents=True, exist_ok=True) + script_name = pathlib.Path(__file__).stem + setup_logging(output_dir, script_name) + # Set all FILTER="." to FILTER="PASS" as required by PURPLE filter_pass_fp = set_filter_pass(kwargs['vcf_fp'], kwargs['tumor_name'], output_dir) @@ -73,55 +77,67 @@ def entry(ctx, **kwargs): ) # Annotate with cancer-related and functional information from a range of sources using PCGR - # - Select variants to process - there is an upper limit for PCGR of around 500k # - Set tumor and normal AF and DP in INFO for PCGR and remove all other annotations # - Run PCGR on minimal VCF (pcgr_prep_fp) # - Transfer selected PCGR annotations to unfiltered VCF (selected_fp) # - PCGR ACMG TIER [INFO/PCGR_TIER] # - VEP consequence [INFO/PCR_CSQ] # - Known mutation hotspot [INFO/PCGR_MUTATION_HOTSPOT] - # - ClinVar clinical significant [INFO/PCGR_CLINVAR_CLNSIG] - # - Hits in COSMIC [INFO/PCGR_COSMIC_COUNT] + # - ClinVar clinical significant [INFO/PCGR_CLNSIG] # - Hits in TCGA [INFO/PCGR_TCGA_PANCANCER_COUNT] # - Hits in PCAWG [INFO/PCGR_ICGC_PCAWG_COUNT] - # Set selected data or full input - selection_data = select_variants( - pon_fp, - kwargs['tumor_name'], - kwargs['cancer_genes_fp'], - output_dir, - ) - - if not (pcgr_prep_input_fp := selection_data.get('filtered')): - pcgr_prep_input_fp = selection_data['selected'] # Prepare VCF for PCGR annotation pcgr_prep_fp = pcgr.prepare_vcf_somatic( - pcgr_prep_input_fp, + pon_fp, kwargs['tumor_name'], kwargs['normal_name'], output_dir, ) - # Run PCGR - pcgr_dir = pcgr.run_somatic( - pcgr_prep_fp, - kwargs['pcgr_data_dir'], - output_dir, - threads=kwargs['threads'], - pcgr_conda=kwargs['pcgr_conda'], - pcgrr_conda=kwargs['pcgrr_conda'], - ) + pcgr_output_dir = output_dir / 'pcgr' + total_variants = util.count_vcf_records(pcgr_prep_fp) + print(f"Total number of variants in the input VCF: {total_variants}") + + # Run PCGR in chunks if exceeding the maximum allowed for somatic variants + chunk_size = kwargs.get('pcgr_variant_chunk_size') + if chunk_size is not None and chunk_size <= 0: + raise click.BadParameter('must be a positive integer', param_hint='--pcgr_variant_chunk_size') + chunk_size = chunk_size or constants.MAX_SOMATIC_VARIANTS + + if total_variants > chunk_size: + vcf_chunks = pcgr.split_vcf(pcgr_prep_fp, output_dir, max_variants=chunk_size) + pcgr_tsv_fp, pcgr_vcf_fp = pcgr.run_somatic_chunck( + vcf_chunks, + kwargs['pcgr_data_dir'], + kwargs['vep_dir'], + output_dir, + pcgr_output_dir, + kwargs['threads'], + kwargs['pcgr_conda'], + kwargs['pcgrr_conda'], + ) + else: + pcgr_tsv_fp, pcgr_vcf_fp = pcgr.run_somatic( + pcgr_prep_fp, + kwargs['pcgr_data_dir'], + kwargs['vep_dir'], + pcgr_output_dir, + chunk_nbr=None, + threads=kwargs['threads'], + pcgr_conda=kwargs['pcgr_conda'], + pcgrr_conda=kwargs['pcgrr_conda'], + ) # Transfer PCGR annotations to full set of variants pcgr.transfer_annotations_somatic( - selection_data['selected'], + pon_fp, kwargs['tumor_name'], - selection_data.get('filter_name'), - pcgr_dir, + pcgr_vcf_fp, + pcgr_tsv_fp, output_dir, ) - + logger.info("Annotation process completed") def set_filter_pass(input_fp, tumor_name, output_dir): output_fp = output_dir / f'{tumor_name}.set_filter_pass.vcf.gz' @@ -136,7 +152,6 @@ def set_filter_pass(input_fp, tumor_name, output_dir): return output_fp - def general_annotations(input_fp, tumor_name, threads, annotations_dir, output_dir): toml_fp = pathlib.Path(annotations_dir) / 'vcfanno_annotations.toml' @@ -169,7 +184,6 @@ def panel_of_normal_annotations(input_fp, tumor_name, threads, pon_dir, output_d util.execute_command(command) return output_fp - def select_variants(input_fp, tumor_name, cancer_genes_fp, output_dir): # Exclude variants until we hopefully move the needle below the threshold diff --git a/bolt/workflows/smlv_somatic/filter.py b/bolt/workflows/smlv_somatic/filter.py index e46f285..c820385 100644 --- a/bolt/workflows/smlv_somatic/filter.py +++ b/bolt/workflows/smlv_somatic/filter.py @@ -1,12 +1,14 @@ import click import pathlib - +import logging import cyvcf2 from ... import util from ...common import constants +from ...logging_config import setup_logging +logger = logging.getLogger(__name__) @click.command(name='filter') @@ -26,6 +28,9 @@ def entry(ctx, **kwargs): output_dir = pathlib.Path(kwargs['output_dir']) output_dir.mkdir(mode=0o755, parents=True, exist_ok=True) + script_name = pathlib.Path(__file__).stem + setup_logging(output_dir, script_name) + # Open input VCF and set required header entries for output in_fh = cyvcf2.VCF(kwargs['vcf_fp']) header_filters = ( @@ -37,7 +42,7 @@ def entry(ctx, **kwargs): constants.VcfFilter.ENCODE, constants.VcfFilter.GNOMAD_COMMON, constants.VcfInfo.SAGE_HOTSPOT_RESCUE, - constants.VcfInfo.PCGR_TIER_RESCUE, + constants.VcfInfo.PCGR_ACTIONABILITY_TIER_RESCUE, constants.VcfInfo.CLINICAL_POTENTIAL_RESCUE, constants.VcfInfo.RESCUED_FILTERS_EXISTING, constants.VcfInfo.RESCUED_FILTERS_PENDING, @@ -126,7 +131,11 @@ def set_filter_data(record, tumor_index): # PON filter ## # NOTE(SW): 'max' is inclusive - keeps variants with 0 to n-1 PON hits; preserved from Umccrise - pon_count = record.INFO.get(constants.VcfInfo.PON_COUNT.value, 0) + pon_count = get_record_value( + record, + constants.VcfInfo.PON_COUNT.value, + default=0 + ) if pon_count >= constants.PON_HIT_THRESHOLD: filters.append(constants.VcfFilter.PON) @@ -141,7 +150,14 @@ def set_filter_data(record, tumor_index): ## # NOTE(SW): rounding is essential here for accurate comparison; cyvcf2 floating-point error # means INFO/gnomAD_AF=0.01 can be represented as 0.009999999776482582 - gnomad_af = round(record.INFO.get(constants.VcfInfo.GNOMAD_AF.value, 0), 3) + gnomad_af = round( + get_record_value( + record, + constants.VcfInfo.GNOMAD_AF.value, + default=0.0 + ), + 3, + ) if gnomad_af >= constants.MAX_GNOMAD_AF: filters.append(constants.VcfFilter.GNOMAD_COMMON) @@ -162,15 +178,15 @@ def set_filter_data(record, tumor_index): ## # PCGR tier rescue ## - pcgr_tier = record.INFO.get(constants.VcfInfo.PCGR_TIER.value) - if pcgr_tier in constants.PCGR_TIERS_RESCUE: - info_rescue.append(constants.VcfInfo.PCGR_TIER_RESCUE) + pcgr_tier = record.INFO.get(constants.VcfInfo.PCGR_ACTIONABILITY_TIER.value) + if pcgr_tier in constants.PCGR_ACTIONABILITY_TIER_RESCUE: + info_rescue.append(constants.VcfInfo.PCGR_ACTIONABILITY_TIER_RESCUE) ## # SAGE hotspot rescue ## # NOTE(SW): effectively reverts any FILTERs that may have been applied above - if record.INFO.get(constants.VcfInfo.SAGE_HOTSPOT.value) is not None: + if get_record_value(record, constants.VcfInfo.SAGE_HOTSPOT.value) is not None: info_rescue.append(constants.VcfInfo.SAGE_HOTSPOT_RESCUE) ## @@ -181,19 +197,26 @@ def set_filter_data(record, tumor_index): # single CLINICAL_POTENTIAL_RESCUE flag # Get ClinVar clinical significance entries - clinvar_clinsig = record.INFO.get(constants.VcfInfo.PCGR_CLINVAR_CLNSIG.value, '') + clinvar_clinsig = get_record_value( + record, + constants.VcfInfo.PCGR_CLINVAR_CLASSIFICATION.value, + default='', + ) clinvar_clinsigs = clinvar_clinsig.split(',') # Hit counts in relevant reference somatic mutation databases - cosmic_count = record.INFO.get(constants.VcfInfo.PCGR_COSMIC_COUNT.value, 0) - tcga_pancancer_count = record.INFO.get(constants.VcfInfo.PCGR_TCGA_PANCANCER_COUNT.value, 0) - icgc_pcawg_count = record.INFO.get(constants.VcfInfo.PCGR_ICGC_PCAWG_COUNT.value, 0) + tcga_pancancer_count = get_record_value( + record, + constants.VcfInfo.PCGR_TCGA_PANCANCER_COUNT.value, + default=0 + ) + hmf_present = get_record_value(record, constants.VcfInfo.HMF_HOTSPOT.value) + pcgr_hotspot_present = get_record_value(record, constants.VcfInfo.PCGR_MUTATION_HOTSPOT.value) + if ( - record.INFO.get(constants.VcfInfo.HMF_HOTSPOT.value) is not None or - record.INFO.get(constants.VcfInfo.PCGR_MUTATION_HOTSPOT.value) is not None or + hmf_present is not None or + pcgr_hotspot_present or any(e in clinvar_clinsigs for e in constants.CLINVAR_CLINSIGS_RESCUE) or - cosmic_count >= constants.MIN_COSMIC_COUNT_RESCUE or - tcga_pancancer_count >= constants.MIN_TCGA_PANCANCER_COUNT_RESCUE or - icgc_pcawg_count >= constants.MIN_ICGC_PCAWG_COUNT_RESCUE + tcga_pancancer_count >= constants.MIN_TCGA_PANCANCER_COUNT_RESCUE ): info_rescue.append(constants.VcfInfo.CLINICAL_POTENTIAL_RESCUE) @@ -228,3 +251,22 @@ def set_filter_data(record, tumor_index): filters_existing = [e for e in record.FILTERS if e != 'PASS'] assert all(e not in filters_existing for e in filters_value) record.FILTER = ';'.join([*filters_existing, *filters_value]) + + +def get_record_value(record, key, *, default=None): + ''' + Return a INFO value, replacing '.', '' and missing entries by default value. + + This prevents placeholder values emitted by pcgr from triggering rescues + or filters. For example, an INFO placeholder '.' for + PCGR_TCGA_PANCANCER_COUNT will resolve to the integer 0 when called with + `get_record_value(record, 'PCGR_TCGA_PANCANCER_COUNT', default=0)`. + ''' + value = record.INFO.get(key) + if value is None: + return default + if value=='' or value=='.': + return default + if not isinstance(value, (int, float, str)): + logger.error(f'record ID: {record.ID} INFO value for {key}: {value} ({type(value)}, not int, float, str)') + return value \ No newline at end of file diff --git a/bolt/workflows/smlv_somatic/report.py b/bolt/workflows/smlv_somatic/report.py index bdf908f..65ce535 100644 --- a/bolt/workflows/smlv_somatic/report.py +++ b/bolt/workflows/smlv_somatic/report.py @@ -1,3 +1,4 @@ +import collections import csv import json import pathlib @@ -27,6 +28,7 @@ @click.option('--pcgrr_conda', required=False, type=str) @click.option('--pcgr_data_dir', required=False, type=str) +@click.option('--vep_dir', required=True, type=click.Path(exists=True)) @click.option('--purple_purity_fp', required=True, type=click.Path(exists=True)) @click.option('--cancer_genes_fp', required=True, type=click.Path(exists=True)) @@ -45,6 +47,9 @@ def entry(ctx, **kwargs): output_dir = pathlib.Path(kwargs['output_dir']) output_dir.mkdir(mode=0o755, parents=True, exist_ok=True) + script_name = pathlib.Path(__file__).stem + setup_logging(output_dir, script_name) + # BCFtools stats bcftools_vcf_fp = bcftools_stats_prepare(kwargs['vcf_fp'], kwargs['tumor_name'], output_dir) run_bcftools_stats(bcftools_vcf_fp, kwargs['tumor_name'], output_dir) @@ -105,19 +110,31 @@ def entry(ctx, **kwargs): fh.write('\n') # PCGR report + if variant_counts_process['filter_pass'] <= constants.MAX_SOMATIC_VARIANTS: + pcgr_input_vcf_fp = kwargs['vcf_fp'] + else: + pcgr_input_vcf_fp = select_pcgr_variants( + kwargs['vcf_fp'], + kwargs['cancer_genes_fp'], + kwargs['tumor_name'], + output_dir, + ) + purple_data = parse_purple_purity_file(kwargs['purple_purity_fp']) pcgr_prep_fp = pcgr.prepare_vcf_somatic( - kwargs['vcf_fp'], + pcgr_input_vcf_fp, kwargs['tumor_name'], kwargs['normal_name'], output_dir, ) + pcgr_output_dir = output_dir / 'pcgr' pcgr.run_somatic( pcgr_prep_fp, kwargs['pcgr_data_dir'], - output_dir, + kwargs['vep_dir'], + pcgr_output_dir, threads=kwargs['threads'], pcgr_conda=kwargs['pcgr_conda'], pcgrr_conda=kwargs['pcgrr_conda'], @@ -284,6 +301,75 @@ def count_variant_process(vcf_fp): return counts +def select_pcgr_variants(vcf_fp, cancer_genes_fp, tumor_name, output_dir): + # Annotate variants in UMCCR somatic gene panel + fp_annotated_out = output_dir / f'{tumor_name}.umccr_panel_variants_annotated.vcf.gz' + util.execute_command(fr''' + bcftools annotate \ + --annotations <(awk 'BEGIN {{ OFS="\t" }} {{ print $1, $2-2000, $3+2000, "1" }}' {cancer_genes_fp}) \ + --header-line '{util.get_vcf_header_line(constants.VcfInfo.PANEL)}' \ + --columns CHROM,FROM,TO,{constants.VcfInfo.PANEL.value} \ + --output {fp_annotated_out} \ + {vcf_fp} + ''') + + # Set filter category for each variant + variants_sorted = collections.defaultdict(list) + for variant_count, variant in enumerate(cyvcf2.VCF(fp_annotated_out), 1): + if any(variant.INFO.get(e) for e in constants.RETAIN_FIELDS_FILTERING): + continue + + data = pcgr.get_variant_filter_data(variant) + variant_filter = pcgr.determine_filter(data) + assert variant_filter + + filter_category = (data['tier'], *variant_filter) + variant_repr = pcgr.get_variant_repr(variant) + variants_sorted[filter_category].append(variant_repr) + + + # Determine the set of filter categories to come under the PCGR 500,000 variant threshold + filter_sum = 0 + filter_categories = list() + for key in pcgr.get_ordering(): + + if (variant_count - filter_sum) <= constants.MAX_SOMATIC_VARIANTS: + break + + filter_sum += len(variants_sorted.get(key, [])) + filter_categories.append(key) + + # Set FILTERS and write out records + filter_variants = set() + for key in filter_categories: + filter_variants.update(variants_sorted[key]) + + fh_in = cyvcf2.VCF(vcf_fp) + util.add_vcf_header_entry(fh_in, constants.VcfFilter.PCGR_COUNT_LIMIT) + + # NOTE(SW): creating an additional VCF with all records for traceability + fp_out = output_dir / f'{tumor_name}.pcgr_hypermutated.pass.vcf.gz' + fp_set_out = output_dir / f'{tumor_name}.pcgr_hypermutated.filters_set.vcf.gz' + + fh_out = cyvcf2.Writer(fp_out, fh_in) + fh_set_out = cyvcf2.Writer(fp_set_out, fh_in) + + for variant in fh_in: + variant_repr = pcgr.get_variant_repr(variant) + if variant_repr not in filter_variants: + # Write only passing + fh_out.write_record(variant) + else: + variant.FILTER = constants.VcfFilter.PCGR_COUNT_LIMIT.value + # Write all variants including those with FILTER set + fh_set_out.write_record(variant) + + fh_out.close() + fh_set_out.close() + + return fp_out + + def parse_purple_purity_file(fp): with open(fp, 'r') as fh: entries = list(csv.DictReader(fh, delimiter='\t')) diff --git a/bolt/workflows/smlv_somatic/rescue.py b/bolt/workflows/smlv_somatic/rescue.py index c92df94..bfd89b9 100644 --- a/bolt/workflows/smlv_somatic/rescue.py +++ b/bolt/workflows/smlv_somatic/rescue.py @@ -7,10 +7,14 @@ import click import cyvcf2 +import logging from ... import util from ...common import constants +from ...logging_config import setup_logging + +logger = logging.getLogger(__name__) @click.command(name='rescue') @@ -37,6 +41,9 @@ def entry(ctx, **kwargs): output_dir = pathlib.Path(kwargs['output_dir']) output_dir.mkdir(mode=0o755, parents=True, exist_ok=True) + script_name = pathlib.Path(__file__).stem + setup_logging(output_dir, script_name) + # Select PASS SAGE variants in hotspots and then split into existing and novel calls sage_pass_vcf_fp = select_sage_pass_hotspot( kwargs['sage_vcf_fp'], diff --git a/bolt/workflows/sv_somatic/annotate.py b/bolt/workflows/sv_somatic/annotate.py index 2988e53..8577922 100644 --- a/bolt/workflows/sv_somatic/annotate.py +++ b/bolt/workflows/sv_somatic/annotate.py @@ -5,10 +5,13 @@ import click import cyvcf2 import pysam +import logging from ... import util +logger = logging.getLogger(__name__) + @click.command(name='annotate') @click.pass_context diff --git a/conda/env/bolt_env.yml b/conda/env/bolt_env.yml index e27bd24..71e90c9 100644 --- a/conda/env/bolt_env.yml +++ b/conda/env/bolt_env.yml @@ -16,3 +16,4 @@ dependencies: - python >=3.10 - pyyaml - vcfanno ==0.3.5 + - ncurses>=6.3 diff --git a/docker/Dockerfile.gpgr b/docker/Dockerfile.gpgr index b8504e9..6b76ecb 100644 --- a/docker/Dockerfile.gpgr +++ b/docker/Dockerfile.gpgr @@ -18,7 +18,7 @@ RUN \ RUN \ conda install --prefix /env/ \ - 'r-gpgr ==2.1.3' \ + 'r-gpgr ==2.2.11' \ 'r-sigrap ==0.1.1' \ 'bioconductor-bsgenome.hsapiens.ucsc.hg38 ==1.4.5' \ 'bioconductor-txdb.hsapiens.ucsc.hg38.knowngene ==3.16.0' \ diff --git a/docker/Dockerfile.pcgr b/docker/Dockerfile.pcgr index 4ea4932..ca984e8 100644 --- a/docker/Dockerfile.pcgr +++ b/docker/Dockerfile.pcgr @@ -15,13 +15,13 @@ RUN \ conda create \ --solver libmamba \ --name pcgr \ - --file https://raw.githubusercontent.com/sigven/pcgr/v1.4.1/conda/env/lock/pcgr-linux-64.lock + --file https://raw.githubusercontent.com/sigven/pcgr/refs/tags/v2.2.1/conda/env/lock/pcgr-linux-64.lock RUN \ conda create \ --solver libmamba \ --name pcgrr \ - --file https://raw.githubusercontent.com/sigven/pcgr/v1.4.1/conda/env/lock/pcgrr-linux-64.lock + --file https://raw.githubusercontent.com/sigven/pcgr/refs/tags/v2.2.1/conda/env/lock/pcgrr-linux-64.lock COPY ./conda/env/bolt_env.yml /tmp/ RUN \ diff --git a/pyproject.toml b/pyproject.toml index ae8b9bb..e7325a3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,7 @@ include = ["bolt*"] [project] name = "bolt" -version = "0.2.14" +version = "0.2.17" authors = [ {name = "Stephen Watts", email = "stephen.watts@umccr.org"}, ] diff --git a/tests/test_smlv_somatic_filter.py b/tests/test_smlv_somatic_filter.py index 41a0e96..6455162 100644 --- a/tests/test_smlv_somatic_filter.py +++ b/tests/test_smlv_somatic_filter.py @@ -160,15 +160,15 @@ def test_common_population_filter(self): def test_pcgr_tier_rescue(self): pcgr_tiers = [ - 'TIER_1', - 'TIER_2', + '1', + '2', ] - rescue_tag_str = bolt_constants.VcfInfo.PCGR_TIER_RESCUE.value + rescue_tag_str = bolt_constants.VcfInfo.PCGR_ACTIONABILITY_TIER_RESCUE.value for pcgr_tier in pcgr_tiers: record = get_record( **self.records['filter_min_af9.9'], - info_data={'PCGR_TIER': pcgr_tier}, + info_data={'PCGR_ACTIONABILITY_TIER': pcgr_tier}, ) smlv_somatic_filter.set_filter_data(record, 0) assert not record.FILTER @@ -189,9 +189,7 @@ def test_clinical_potential_rescue_general(self): info_data_sets = [ {'HMF_HOTSPOT': ''}, {'PCGR_MUTATION_HOTSPOT': ''}, - {'PCGR_COSMIC_COUNT': 11}, {'PCGR_TCGA_PANCANCER_COUNT': 6}, - {'PCGR_ICGC_PCAWG_COUNT': 6}, ] rescue_tag_str = bolt_constants.VcfInfo.CLINICAL_POTENTIAL_RESCUE.value @@ -217,7 +215,7 @@ def test_clinical_potential_rescue_clinvar_clinsig(self): for clinsig in clinsigs: record = get_record( **self.records['filter_min_af9.9'], - info_data={'PCGR_CLINVAR_CLNSIG': clinsig}, + info_data={'PCGR_CLINVAR_CLASSIFICATION': clinsig}, ) smlv_somatic_filter.set_filter_data(record, 0) assert not record.FILTER