Skip to content

Commit

Permalink
Merge branch 'dev' of https://github.com/opentargets/gentropy into il…
Browse files Browse the repository at this point in the history
…-shapley-predictions
  • Loading branch information
ireneisdoomed committed Feb 13, 2025
2 parents 62f45b4 + 30a6046 commit c388ff5
Show file tree
Hide file tree
Showing 4 changed files with 304 additions and 37 deletions.
10 changes: 9 additions & 1 deletion src/gentropy/dataset/study_locus.py
Original file line number Diff line number Diff line change
Expand Up @@ -433,7 +433,8 @@ def _qc_subsignificant_associations(
def qc_abnormal_pips(
self: StudyLocus,
sum_pips_lower_threshold: float = 0.99,
sum_pips_upper_threshold: float = 1.0001, # Set slightly above 1 to account for floating point errors
# Set slightly above 1 to account for floating point errors
sum_pips_upper_threshold: float = 1.0001,
) -> StudyLocus:
"""Filter study-locus by sum of posterior inclusion probabilities to ensure that the sum of PIPs is within a given range.
Expand Down Expand Up @@ -691,6 +692,7 @@ def flag_trans_qtls(
"""Flagging transQTL credible sets based on genomic location of the measured gene.
Process:
0. Make sure that the `isTransQtl` column does not exist (remove if exists)
1. Enrich study-locus dataset with geneId based on study metadata. (only QTL studies are considered)
2. Enrich with transcription start site and chromosome of the studied gegne.
3. Flagging any tagging variant of QTL credible sets, if chromosome is different from the gene or distance is above the threshold.
Expand All @@ -709,6 +711,12 @@ def flag_trans_qtls(
if "geneId" not in study_index.df.columns:
return self

# We have to remove the column `isTransQtl` to ensure the column is not duplicated
# The duplication can happen when one reads the StudyLocus from parquet with
# predefined schema that already contains the `isTransQtl` column.
if "isTransQtl" in self.df.columns:
self.df = self.df.drop("isTransQtl")

# Process study index:
processed_studies = (
study_index.df
Expand Down
58 changes: 56 additions & 2 deletions src/gentropy/method/colocalisation.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,11 +208,15 @@ class Coloc(ColocalisationMethodInterface):
Attributes:
PSEUDOCOUNT (float): Pseudocount to avoid log(0). Defaults to 1e-10.
OVERLAP_SIZE_CUTOFF (int): Minimum number of overlapping variants bfore filtering. Defaults to 5.
POSTERIOR_CUTOFF (float): Minimum overlapping Posterior probability cutoff for small overlaps. Defaults to 0.5.
"""

METHOD_NAME: str = "COLOC"
METHOD_METRIC: str = "h4"
PSEUDOCOUNT: float = 1e-10
OVERLAP_SIZE_CUTOFF: int = 5
POSTERIOR_CUTOFF: float = 0.5

@staticmethod
def _get_posteriors(all_bfs: NDArray[np.float64]) -> DenseVector:
Expand Down Expand Up @@ -277,7 +281,15 @@ def colocalise(
)
.select("*", "statistics.*")
# Before summing log_BF columns nulls need to be filled with 0:
.fillna(0, subset=["left_logBF", "right_logBF"])
.fillna(
0,
subset=[
"left_logBF",
"right_logBF",
"left_posteriorProbability",
"right_posteriorProbability",
],
)
# Sum of log_BFs for each pair of signals
.withColumn(
"sum_log_bf",
Expand Down Expand Up @@ -305,9 +317,18 @@ def colocalise(
fml.array_to_vector(f.collect_list(f.col("right_logBF"))).alias(
"right_logBF"
),
fml.array_to_vector(
f.collect_list(f.col("left_posteriorProbability"))
).alias("left_posteriorProbability"),
fml.array_to_vector(
f.collect_list(f.col("right_posteriorProbability"))
).alias("right_posteriorProbability"),
fml.array_to_vector(f.collect_list(f.col("sum_log_bf"))).alias(
"sum_log_bf"
),
f.collect_list(f.col("tagVariantSource")).alias(
"tagVariantSourceList"
),
)
.withColumn("logsum1", logsum(f.col("left_logBF")))
.withColumn("logsum2", logsum(f.col("right_logBF")))
Expand All @@ -327,10 +348,39 @@ def colocalise(
# h3
.withColumn("sumlogsum", f.col("logsum1") + f.col("logsum2"))
.withColumn("max", f.greatest("sumlogsum", "logsum12"))
.withColumn(
"anySnpBothSidesHigh",
f.aggregate(
f.transform(
f.arrays_zip(
fml.vector_to_array(f.col("left_posteriorProbability")),
fml.vector_to_array(
f.col("right_posteriorProbability")
),
f.col("tagVariantSourceList"),
),
# row["0"] = left PP, row["1"] = right PP, row["tagVariantSourceList"]
lambda row: f.when(
(row["tagVariantSourceList"] == "both")
& (row["0"] > Coloc.POSTERIOR_CUTOFF)
& (row["1"] > Coloc.POSTERIOR_CUTOFF),
1.0,
).otherwise(0.0),
),
f.lit(0.0),
lambda acc, x: acc + x,
)
> 0, # True if sum of these 1.0's > 0
)
.filter(
(f.col("numberColocalisingVariants") > Coloc.OVERLAP_SIZE_CUTOFF)
| (f.col("anySnpBothSidesHigh"))
)
.withColumn(
"logdiff",
f.when(
f.col("sumlogsum") == f.col("logsum12"), Coloc.PSEUDOCOUNT
(f.col("sumlogsum") == f.col("logsum12")),
Coloc.PSEUDOCOUNT,
).otherwise(
f.col("max")
+ f.log(
Expand Down Expand Up @@ -382,6 +432,10 @@ def colocalise(
"lH2bf",
"lH3bf",
"lH4bf",
"left_posteriorProbability",
"right_posteriorProbability",
"tagVariantSourceList",
"anySnpBothSidesHigh",
)
.withColumn("colocalisationMethod", f.lit(cls.METHOD_NAME))
.join(
Expand Down
36 changes: 31 additions & 5 deletions tests/gentropy/dataset/test_study_locus.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

from __future__ import annotations

from pathlib import Path
from typing import Any

import pyspark.sql.functions as f
Expand All @@ -18,6 +19,8 @@
StructType,
)

from gentropy.common.schemas import SchemaValidationError
from gentropy.common.session import Session
from gentropy.dataset.colocalisation import Colocalisation
from gentropy.dataset.l2g_feature_matrix import L2GFeatureMatrix
from gentropy.dataset.ld_index import LDIndex
Expand Down Expand Up @@ -1209,7 +1212,6 @@ class TestTransQtlFlagging:
]

STUDY_LOCUS_COLUMNS = ["studyLocusId", "variantId", "studyId"]

STUDY_DATA = [
("s1", "p1", "qtl", "g1"),
("s2", "p2", "gwas", None),
Expand All @@ -1221,21 +1223,21 @@ class TestTransQtlFlagging:
GENE_COLUMNS = ["id", "strand", "start", "end", "chromosome", "tss"]

@pytest.fixture(autouse=True)
def _setup(self: TestTransQtlFlagging, spark: SparkSession) -> None:
def _setup(self: TestTransQtlFlagging, session: Session) -> None:
"""Setup study locus for testing."""
self.study_locus = StudyLocus(
_df=(
spark.createDataFrame(
session.spark.createDataFrame(
self.STUDY_LOCUS_DATA, self.STUDY_LOCUS_COLUMNS
).withColumn("locus", f.array(f.struct("variantId")))
)
)
self.study_index = StudyIndex(
_df=spark.createDataFrame(self.STUDY_DATA, self.STUDY_COLUMNS)
_df=session.spark.createDataFrame(self.STUDY_DATA, self.STUDY_COLUMNS)
)
self.target_index = TargetIndex(
_df=(
spark.createDataFrame(self.GENE_DATA, self.GENE_COLUMNS).select(
session.spark.createDataFrame(self.GENE_DATA, self.GENE_COLUMNS).select(
f.struct(
f.col("strand").cast(IntegerType()).alias("strand"),
"start",
Expand Down Expand Up @@ -1283,3 +1285,27 @@ def test_correctness_found_trans(self: TestTransQtlFlagging) -> None:
assert (
self.qtl_flagged.df.filter(f.col("isTransQtl")).count() == 2
), "Expected number of rows differ from observed."

def test_add_flag_if_column_is_present(
self: TestTransQtlFlagging, tmp_path: Path, session: Session
) -> None:
"""Test adding flag if the `isTransQtl` column is already present.
When reading the dataset, the reader will add the `isTransQtl` column to
the schema, which can cause column duplication captured only by Dataset schema validation.
This test ensures that the column is dropped before the `flag_trans_qtls` is run.
"""
dataset_path = str(tmp_path / "study_locus")
self.study_locus.df.write.parquet(dataset_path)
schema_validated_study_locus = StudyLocus.from_parquet(session, dataset_path)
assert (
"isTransQtl" in schema_validated_study_locus.df.columns
), "`isTransQtl` column is missing after reading the dataset."
# Rerun the flag addition and check if any error is raised by the schema validation
try:
schema_validated_study_locus.flag_trans_qtls(
self.study_index, self.target_index, self.THRESHOLD
)
except SchemaValidationError:
pytest.fail("Failed to validate the schema when adding isTransQtl flag")
Loading

0 comments on commit c388ff5

Please sign in to comment.