Skip to content

Commit

Permalink
Add initial redesign of liftings maps
Browse files Browse the repository at this point in the history
  • Loading branch information
luisfpereira committed Nov 25, 2024
1 parent 6b5c349 commit 0863fc4
Show file tree
Hide file tree
Showing 8 changed files with 550 additions and 109 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -3,21 +3,24 @@
import torch

from topobenchmarkx.transforms.liftings.graph2simplicial import (
SimplicialCliqueLifting,
SimplicialCliqueLifting
)

from topobenchmarkx.transforms.converters import Data2NxGraph, Complex2Dict
from topobenchmarkx.transforms.liftings.base import LiftingTransform

class TestSimplicialCliqueLifting:
"""Test the SimplicialCliqueLifting class."""

def setup_method(self):
# Initialise the SimplicialCliqueLifting class
self.lifting_signed = SimplicialCliqueLifting(
complex_dim=3, signed=True
)
self.lifting_unsigned = SimplicialCliqueLifting(
complex_dim=3, signed=False
)
data2graph = Data2NxGraph()
simplicial2dict_signed = Complex2Dict(signed=True)
simplicial2dict_unsigned = Complex2Dict(signed=False)

lifting_map = SimplicialCliqueLifting(complex_dim=3)

self.lifting_signed = LiftingTransform(data2graph, simplicial2dict_signed, lifting_map)
self.lifting_unsigned = LiftingTransform(data2graph, simplicial2dict_unsigned, lifting_map)

def test_lift_topology(self, simple_graph_1):
"""Test the lift_topology method."""
Expand Down
85 changes: 85 additions & 0 deletions topobenchmarkx/complex.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
import torch


class PlainComplex:
def __init__(
self,
incidence,
down_laplacian,
up_laplacian,
adjacency,
coadjacency,
hodge_laplacian,
features=None,
):
# TODO: allow None with nice error message if callable?

# TODO: make this private? do not allow for changes in these values?
self.incidence = incidence
self.down_laplacian = down_laplacian
self.up_laplacian = up_laplacian
self.adjacency = adjacency
self.coadjacency = coadjacency
self.hodge_laplacian = hodge_laplacian

if features is None:
features = [None for _ in range(len(self.incidence))]
else:
for rank, dim in enumerate(self.shape):
# TODO: make error message more informative
if (
features[rank] is not None
and features[rank].shape[0] != dim
):
raise ValueError("Features have wrong shape.")

self.features = features

@property
def shape(self):
"""Shape of the complex.
Returns
-------
list[int]
"""
return [incidence.shape[-1] for incidence in self.incidence]

@property
def max_rank(self):
"""Maximum rank of the complex.
Returns
-------
int
"""
return len(self.incidence)

def update_features(self, rank, values):
"""Update features.
Parameters
----------
rank : int
Rank of simplices the features belong to.
values : array-like
New features for the rank-simplices.
"""
self.features[rank] = values

def reset_features(self):
"""Reset features."""
self.features = [None for _ in self.features]

def propagate_values(self, rank, values):
"""Propagate features from a rank to an upper one.
Parameters
----------
rank : int
Rank of the simplices the values belong to.
values : array-like
Features for the rank-simplices.
"""
# TODO: can be made much better
return torch.matmul(torch.abs(self.incidence[rank + 1].t()), values)
Loading

0 comments on commit 0863fc4

Please sign in to comment.