Skip to content

Commit

Permalink
Merge pull request #235 from pyt-team/jude/use_new_sparse_method
Browse files Browse the repository at this point in the history
Replace old sparse -> dense -> sparse with new from_sparse method
  • Loading branch information
jkhouja authored Oct 9, 2023
2 parents 1178970 + 5118ee2 commit 62ee00c
Show file tree
Hide file tree
Showing 26 changed files with 128 additions and 115 deletions.
13 changes: 4 additions & 9 deletions test/nn/simplicial/test_san.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from toponetx.classes import SimplicialComplex

from topomodelx.nn.simplicial.san import SAN
from topomodelx.utils.sparse import from_sparse


class TestSAN:
Expand All @@ -32,9 +33,7 @@ def test_forward(self):
simplicial_complex.add_simplex(simplex)
x_1 = torch.randn(35, 2)
x_0 = torch.randn(15, 2)
incidence_0_1 = torch.from_numpy(
simplicial_complex.incidence_matrix(1).todense()
).to_sparse()
incidence_0_1 = from_sparse(simplicial_complex.incidence_matrix(1))
x = x_1 + torch.sparse.mm(incidence_0_1.T, x_0)
in_channels = x.shape[-1]
hidden_channels = 16
Expand All @@ -45,12 +44,8 @@ def test_forward(self):
out_channels=out_channels,
n_layers=1,
)
laplacian_down_1 = torch.from_numpy(
simplicial_complex.down_laplacian_matrix(rank=1).todense()
).to_sparse()
laplacian_up_1 = torch.from_numpy(
simplicial_complex.up_laplacian_matrix(rank=1).todense()
).to_sparse()
laplacian_down_1 = from_sparse(simplicial_complex.down_laplacian_matrix(rank=1))
laplacian_up_1 = from_sparse(simplicial_complex.up_laplacian_matrix(rank=1))

assert torch.any(
torch.isclose(
Expand Down
9 changes: 5 additions & 4 deletions test/nn/simplicial/test_sca_cmps.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from toponetx.classes import SimplicialComplex

from topomodelx.nn.simplicial.sca_cmps import SCACMPS
from topomodelx.utils.sparse import from_sparse


class TestSCA:
Expand Down Expand Up @@ -38,10 +39,10 @@ def test_forward(self):
down_lap2 = simplicial_complex.down_laplacian_matrix(rank=2)
incidence_1t = simplicial_complex.incidence_matrix(rank=1).T
incidence_2t = simplicial_complex.incidence_matrix(rank=2).T
down_lap1 = torch.from_numpy(down_lap1.todense()).to_sparse()
down_lap2 = torch.from_numpy(down_lap2.todense()).to_sparse()
incidence_1t = torch.from_numpy(incidence_1t.todense()).to_sparse()
incidence_2t = torch.from_numpy(incidence_2t.todense()).to_sparse()
down_lap1 = from_sparse(down_lap1)
down_lap2 = from_sparse(down_lap2)
incidence_1t = from_sparse(incidence_1t)
incidence_2t = from_sparse(incidence_2t)
channels_list = [x_0.shape[-1], x_1.shape[-1], x_2.shape[-1]]
complex_dim = 3
model = SCACMPS(
Expand Down
3 changes: 2 additions & 1 deletion test/nn/simplicial/test_sccn.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from toponetx.classes import SimplicialComplex

from topomodelx.nn.simplicial.sccn import SCCN
from topomodelx.utils.sparse import from_sparse


class TestSCCN:
Expand Down Expand Up @@ -38,7 +39,7 @@ def test_forward(self):
max_rank = 2

def sparse_to_torch(X):
return torch.from_numpy(X.todense()).to_sparse()
return from_sparse(X)

incidences = {
f"rank_{r}": sparse_to_torch(simplicial_complex.incidence_matrix(rank=r))
Expand Down
13 changes: 7 additions & 6 deletions test/nn/simplicial/test_sccnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from toponetx.classes import SimplicialComplex

from topomodelx.nn.simplicial.sccnn import SCCNN
from topomodelx.utils.sparse import from_sparse


class TestSCCNN:
Expand Down Expand Up @@ -46,12 +47,12 @@ def test_forward(self):
laplacian_up_1 = simplicial_complex.up_laplacian_matrix(rank=1)
laplacian_2 = simplicial_complex.hodge_laplacian_matrix(rank=2, weight=True)

incidence_1 = torch.from_numpy(incidence_1.todense()).to_sparse()
incidence_2 = torch.from_numpy(incidence_2.todense()).to_sparse()
laplacian_0 = torch.from_numpy(laplacian_0.todense()).to_sparse()
laplacian_down_1 = torch.from_numpy(laplacian_down_1.todense()).to_sparse()
laplacian_up_1 = torch.from_numpy(laplacian_up_1.todense()).to_sparse()
laplacian_2 = torch.from_numpy(laplacian_2.todense()).to_sparse()
incidence_1 = from_sparse(incidence_1)
incidence_2 = from_sparse(incidence_2)
laplacian_0 = from_sparse(laplacian_0)
laplacian_down_1 = from_sparse(laplacian_down_1)
laplacian_up_1 = from_sparse(laplacian_up_1)
laplacian_2 = from_sparse(laplacian_2)
conv_order = 2
intermediate_channels_all = (16, 16, 16)
out_channels_all = intermediate_channels_all
Expand Down
7 changes: 4 additions & 3 deletions test/nn/simplicial/test_scn2.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from toponetx.classes import SimplicialComplex

from topomodelx.nn.simplicial.scn2 import SCN2
from topomodelx.utils.sparse import from_sparse


class TestSCN2:
Expand Down Expand Up @@ -38,9 +39,9 @@ def test_forward(self):
laplacian_1 = simplicial_complex.normalized_laplacian_matrix(rank=1)
laplacian_2 = simplicial_complex.normalized_laplacian_matrix(rank=2)

laplacian_0 = torch.from_numpy(laplacian_0.todense()).to_sparse()
laplacian_1 = torch.from_numpy(laplacian_1.todense()).to_sparse()
laplacian_2 = torch.from_numpy(laplacian_2.todense()).to_sparse()
laplacian_0 = from_sparse(laplacian_0)
laplacian_1 = from_sparse(laplacian_1)
laplacian_2 = from_sparse(laplacian_2)
in_channels_0 = x_0.shape[1]
in_channels_1 = x_1.shape[1]
in_channels_2 = x_2.shape[1]
Expand Down
13 changes: 7 additions & 6 deletions test/nn/simplicial/test_scnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from toponetx.classes import SimplicialComplex

from topomodelx.nn.simplicial.scnn import SCNN
from topomodelx.utils.sparse import from_sparse


class TestSCNN:
Expand Down Expand Up @@ -41,12 +42,12 @@ def test_forward(self):
laplacian_up_1 = simplicial_complex.up_laplacian_matrix(rank=1)
laplacian_2 = simplicial_complex.hodge_laplacian_matrix(rank=2, weight=True)

incidence_1 = torch.from_numpy(incidence_1.todense()).to_sparse()
incidence_2 = torch.from_numpy(incidence_2.todense()).to_sparse()
laplacian_0 = torch.from_numpy(laplacian_0.todense()).to_sparse()
laplacian_down_1 = torch.from_numpy(laplacian_down_1.todense()).to_sparse()
laplacian_up_1 = torch.from_numpy(laplacian_up_1.todense()).to_sparse()
laplacian_2 = torch.from_numpy(laplacian_2.todense()).to_sparse()
incidence_1 = from_sparse(incidence_1)
incidence_2 = from_sparse(incidence_2)
laplacian_0 = from_sparse(laplacian_0)
laplacian_down_1 = from_sparse(laplacian_down_1)
laplacian_up_1 = from_sparse(laplacian_up_1)
laplacian_2 = from_sparse(laplacian_2)
conv_order_down = 2
conv_order_up = 2
intermediate_channels = 4
Expand Down
2 changes: 1 addition & 1 deletion topomodelx/utils/sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,4 +24,4 @@ def from_sparse(data: _csc.csc_matrix) -> torch.Tensor:
values = torch.FloatTensor(coo.data)
indices = torch.LongTensor(np.vstack((coo.row, coo.col)))

return torch.sparse_coo_tensor(indices, values, coo.shape)
return torch.sparse_coo_tensor(indices, values, coo.shape).coalesce()
9 changes: 4 additions & 5 deletions tutorials/cell/can_train.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,8 @@
"import torch.nn.functional as F\n",
"\n",
"\n",
"from topomodelx.nn.cell.can import CAN"
"from topomodelx.nn.cell.can import CAN\n",
"from topomodelx.utils.sparse import from_sparse"
]
},
{
Expand Down Expand Up @@ -253,14 +254,12 @@
" adjacency_0_list.append(adjacency_0)\n",
"\n",
" lower_neighborhood_t = cell_complex.down_laplacian_matrix(rank=1)\n",
" lower_neighborhood_t = torch.from_numpy(lower_neighborhood_t.todense()).to_sparse()\n",
" lower_neighborhood_t = from_sparse(lower_neighborhood_t)\n",
" lower_neighborhood_list.append(lower_neighborhood_t)\n",
"\n",
" try:\n",
" upper_neighborhood_t = cell_complex.up_laplacian_matrix(rank=1)\n",
" upper_neighborhood_t = torch.from_numpy(\n",
" upper_neighborhood_t.todense()\n",
" ).to_sparse()\n",
" upper_neighborhood_t = from_sparse(upper_neighborhood_t)\n",
" except:\n",
" upper_neighborhood_t = np.zeros(\n",
" (lower_neighborhood_t.shape[0], lower_neighborhood_t.shape[0])\n",
Expand Down
7 changes: 4 additions & 3 deletions tutorials/cell/ccxn_train.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,8 @@
"import toponetx.datasets as datasets\n",
"\n",
"\n",
"from topomodelx.nn.cell.ccxn import CCXN"
"from topomodelx.nn.cell.ccxn import CCXN\n",
"from topomodelx.utils.sparse import from_sparse"
]
},
{
Expand Down Expand Up @@ -194,8 +195,8 @@
"\n",
" incidence_2_t = cell_complex.incidence_matrix(rank=2).T\n",
" adjacency_0 = cell_complex.adjacency_matrix(rank=0)\n",
" incidence_2_t = torch.from_numpy(incidence_2_t.todense()).to_sparse()\n",
" adjacency_0 = torch.from_numpy(adjacency_0.todense()).to_sparse()\n",
" incidence_2_t = from_sparse(incidence_2_t)\n",
" adjacency_0 = from_sparse(adjacency_0)\n",
" incidence_2_t_list.append(incidence_2_t)\n",
" adjacency_0_list.append(adjacency_0)"
]
Expand Down
9 changes: 5 additions & 4 deletions tutorials/cell/cwn_train.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@
"\n",
"import toponetx.datasets as datasets\n",
"\n",
"from topomodelx.nn.cell.cwn import CWN"
"from topomodelx.nn.cell.cwn import CWN\n",
"from topomodelx.utils.sparse import from_sparse"
]
},
{
Expand Down Expand Up @@ -222,9 +223,9 @@
" adjacency_1 = cell_complex.adjacency_matrix(rank=1)\n",
" incidence_1_t = cell_complex.incidence_matrix(rank=1).T\n",
"\n",
" incidence_2 = torch.from_numpy(incidence_2.todense()).to_sparse()\n",
" adjacency_1 = torch.from_numpy(adjacency_1.todense()).to_sparse()\n",
" incidence_1_t = torch.from_numpy(incidence_1_t.todense()).to_sparse()\n",
" incidence_2 = from_sparse(incidence_2)\n",
" adjacency_1 = from_sparse(adjacency_1)\n",
" incidence_1_t = from_sparse(incidence_1_t)\n",
"\n",
" incidence_2_list.append(incidence_2)\n",
" adjacency_1_list.append(adjacency_1)\n",
Expand Down
7 changes: 4 additions & 3 deletions tutorials/hypergraph/allset_train.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,8 @@
"from sklearn.model_selection import train_test_split\n",
"import toponetx.datasets as datasets\n",
"\n",
"from topomodelx.nn.hypergraph.allset import AllSet"
"from topomodelx.nn.hypergraph.allset import AllSet\n",
"from topomodelx.utils.sparse import from_sparse"
]
},
{
Expand Down Expand Up @@ -196,7 +197,7 @@
"incidence_1_list = []\n",
"for simplex in simplexes:\n",
" incidence_1 = simplex.incidence_matrix(rank=1, signed=False)\n",
" # incidence_1 = torch.from_numpy(incidence_1.todense()).to_sparse()\n",
" # incidence_1 = from_sparse(incidence_1)\n",
" # incidence_1_list.append(incidence_1)\n",
" hg = simplex.to_hypergraph()\n",
" hg_list.append(hg)\n",
Expand All @@ -205,7 +206,7 @@
"# Extract hypergraphs incident matrices from collected hypergraphs\n",
"for hg in hg_list:\n",
" incidence_1 = hg.incidence_matrix()\n",
" incidence_1 = torch.from_numpy(incidence_1.todense()).to_sparse()\n",
" incidence_1 = from_sparse(incidence_1)\n",
" incidence_1_list.append(incidence_1)"
]
},
Expand Down
3 changes: 2 additions & 1 deletion tutorials/hypergraph/allset_transformer_train.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@
"from sklearn.model_selection import train_test_split\n",
"\n",
"from topomodelx.nn.hypergraph.allset_transformer import AllSetTransformer\n",
"from topomodelx.utils.sparse import from_sparse\n",
"\n",
"# %load_ext autoreload\n",
"# %autoreload 2"
Expand Down Expand Up @@ -236,7 +237,7 @@
"# Extract hypergraphs incident matrices from collected hypergraphs\n",
"for hg in hg_list:\n",
" incidence_1 = hg.incidence_matrix()\n",
" incidence_1 = torch.from_numpy(incidence_1.todense()).to_sparse()\n",
" incidence_1 = from_sparse(incidence_1)\n",
" incidence_1_list.append(incidence_1)"
]
},
Expand Down
5 changes: 3 additions & 2 deletions tutorials/hypergraph/dhgcn_train.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@
"from sklearn.model_selection import train_test_split\n",
"\n",
"import toponetx.datasets as datasets\n",
"from topomodelx.nn.hypergraph.dhgcn import DHGCN"
"from topomodelx.nn.hypergraph.dhgcn import DHGCN\n",
"from topomodelx.utils.sparse import from_sparse"
]
},
{
Expand Down Expand Up @@ -204,7 +205,7 @@
"incidence_1_list = []\n",
"for simplex in simplexes:\n",
" incidence_1 = simplex.incidence_matrix(rank=1, signed=False)\n",
" incidence_1 = torch.from_numpy(incidence_1.todense()).to_sparse()\n",
" incidence_1 = from_sparse(incidence_1)\n",
" incidence_1_list.append(incidence_1)\n",
" hg = simplex.to_hypergraph()\n",
" hg_list.append(hg)"
Expand Down
5 changes: 3 additions & 2 deletions tutorials/hypergraph/hnhn_train.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,8 @@
"import toponetx.datasets.graph as graph\n",
"from topomodelx.nn.hypergraph.hnhn_layer import HNHNLayer\n",
"import matplotlib.pyplot as plt\n",
"from topomodelx.nn.hypergraph.hnhn import HNHN, HNHNNetwork"
"from topomodelx.nn.hypergraph.hnhn import HNHN, HNHNNetwork\n",
"from topomodelx.utils.sparse import from_sparse"
]
},
{
Expand Down Expand Up @@ -119,7 +120,7 @@
],
"source": [
"incidence_1 = dataset_sim.incidence_matrix(rank=1, signed=False)\n",
"incidence_1 = torch.from_numpy(incidence_1.todense()).to_sparse()\n",
"incidence_1 = from_sparse(incidence_1)\n",
"print(f\"The incidence matrix B1 has shape: {incidence_1.shape}.\")"
]
},
Expand Down
5 changes: 3 additions & 2 deletions tutorials/hypergraph/hypergat_train.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,8 @@
"import toponetx.datasets as datasets\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"from topomodelx.nn.hypergraph.hypergat import HyperGAT"
"from topomodelx.nn.hypergraph.hypergat import HyperGAT\n",
"from topomodelx.utils.sparse import from_sparse"
]
},
{
Expand Down Expand Up @@ -177,7 +178,7 @@
"# Extract hypergraphs incident matrices from collected hypergraphs\n",
"for hg in hg_list:\n",
" incidence_1 = hg.incidence_matrix()\n",
" incidence_1 = torch.from_numpy(incidence_1.todense()).to_sparse()\n",
" incidence_1 = from_sparse(incidence_1)\n",
" incidence_1_list.append(incidence_1)"
]
},
Expand Down
5 changes: 3 additions & 2 deletions tutorials/hypergraph/hypersage_train.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,8 @@
"import toponetx.datasets as datasets\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"from topomodelx.nn.hypergraph.hypersage import HyperSAGE"
"from topomodelx.nn.hypergraph.hypersage import HyperSAGE\n",
"from topomodelx.utils.sparse import from_sparse"
]
},
{
Expand Down Expand Up @@ -164,7 +165,7 @@
"# Extract hypergraphs incident matrices from collected hypergraphs\n",
"for hg in hg_list:\n",
" incidence_1 = hg.incidence_matrix()\n",
" incidence_1 = torch.from_numpy(incidence_1.todense()).to_sparse()\n",
" incidence_1 = from_sparse(incidence_1)\n",
" incidence_1_list.append(incidence_1)"
]
},
Expand Down
5 changes: 3 additions & 2 deletions tutorials/hypergraph/unigin_train.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@
"from torch_geometric.datasets import TUDataset\n",
"from torch_geometric.utils.convert import to_networkx\n",
"from toponetx.classes.simplicial_complex import SimplicialComplex\n",
"from topomodelx.nn.hypergraph.unigin import UniGIN"
"from topomodelx.nn.hypergraph.unigin import UniGIN\n",
"from topomodelx.utils.sparse import from_sparse"
]
},
{
Expand Down Expand Up @@ -58,7 +59,7 @@
"incidence_1_list = []\n",
"for hg in hg_list:\n",
" incidence_1 = hg.incidence_matrix()\n",
" incidence_1 = torch.from_numpy(incidence_1.todense()).to_sparse()\n",
" incidence_1 = from_sparse(incidence_1)\n",
" incidence_1_list.append(incidence_1)"
]
},
Expand Down
5 changes: 3 additions & 2 deletions tutorials/simplicial/dist2cycle_train.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
"import toponetx.datasets.graph as graph\n",
"\n",
"from topomodelx.nn.simplicial.dist2cycle import Dist2Cycle\n",
"from topomodelx.utils.sparse import from_sparse\n",
"import numpy.linalg as npla"
]
},
Expand Down Expand Up @@ -101,8 +102,8 @@
"incidence_1 = dataset.incidence_matrix(rank=1)\n",
"adjacency_0 = dataset.adjacency_matrix(rank=0)\n",
"\n",
"incidence_1 = torch.from_numpy(incidence_1.todense()).to_sparse()\n",
"adjacency_0 = torch.from_numpy(adjacency_0.todense()).to_sparse()\n",
"incidence_1 = from_sparse(incidence_1)\n",
"adjacency_0 = from_sparse(adjacency_0)\n",
"\n",
"print(f\"The incidence matrix B1 has shape: {incidence_1.shape}.\")\n",
"print(f\"The adjacency matrix A0 has shape: {adjacency_0.shape}.\")"
Expand Down
Loading

0 comments on commit 62ee00c

Please sign in to comment.