Skip to content

Commit

Permalink
Reformatting using black, changing generation of indices using random…
Browse files Browse the repository at this point in the history
… seeds, and generating common indices for the new tasks names and betti numbers.
  • Loading branch information
rballeba committed May 4, 2024
1 parent 6bd0489 commit 14e8a39
Show file tree
Hide file tree
Showing 21 changed files with 369 additions and 251 deletions.
30 changes: 21 additions & 9 deletions experiment.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -46,21 +46,30 @@
}
],
"source": [
"class NameToClass: \n",
"class NameToClass:\n",
" def __init__(self):\n",
" self.class_dict = {'Klein bottle': 0, '': 1, 'RP^2': 2, 'T^2': 3, 'S^2': 4}\n",
" \n",
" def __call__(self,data):\n",
" data.y = F.one_hot(torch.tensor(self.class_dict[data.name]),num_classes=5)\n",
" self.class_dict = {\n",
" \"Klein bottle\": 0,\n",
" \"\": 1,\n",
" \"RP^2\": 2,\n",
" \"T^2\": 3,\n",
" \"S^2\": 4,\n",
" }\n",
"\n",
" def __call__(self, data):\n",
" data.y = F.one_hot(\n",
" torch.tensor(self.class_dict[data.name]), num_classes=5\n",
" )\n",
" return data\n",
"\n",
"\n",
"tr = transforms.Compose(\n",
" [\n",
" TriangulationToFaceTransform(),\n",
" FaceToEdge(remove_faces=False),\n",
" DegreeTransform(),\n",
" OrientableToClassTransform(),\n",
" NameToClass()\n",
" NameToClass(),\n",
" ]\n",
")\n",
"\n",
Expand Down Expand Up @@ -119,6 +128,7 @@
],
"source": [
"from collections import Counter\n",
"\n",
"# Tally occurrences of words in a list\n",
"cnt = Counter()\n",
"for data in dataset:\n",
Expand Down Expand Up @@ -148,7 +158,7 @@
"test_dataset = dataset[-150:]\n",
"\n",
"print(f\"Number of training graphs: {len(train_dataset)}\")\n",
"print(f\"Number of test graphs: {len(test_dataset)}\")\n"
"print(f\"Number of test graphs: {len(test_dataset)}\")"
]
},
{
Expand All @@ -168,8 +178,10 @@
}
],
"source": [
"train_loader = DataLoader(train_dataset,batch_size=10)#,sampler=ImbalancedSampler(train_dataset))\n",
"test_loader = DataLoader(test_dataset,batch_size=10)\n",
"train_loader = DataLoader(\n",
" train_dataset, batch_size=10\n",
") # ,sampler=ImbalancedSampler(train_dataset))\n",
"test_loader = DataLoader(test_dataset, batch_size=10)\n",
"\n",
"\n",
"for batch in train_loader:\n",
Expand Down
34 changes: 20 additions & 14 deletions experiment_betti_TAG.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@
" FaceToEdge(remove_faces=False),\n",
" DegreeTransform(),\n",
" OrientableToClassTransform(),\n",
" OneHotDegree(max_degree=8,cat=False)\n",
" OneHotDegree(max_degree=8, cat=False),\n",
" ]\n",
")\n",
"\n",
Expand Down Expand Up @@ -148,7 +148,7 @@
"test_dataset = dataset[-150:]\n",
"\n",
"print(f\"Number of training graphs: {len(train_dataset)}\")\n",
"print(f\"Number of test graphs: {len(test_dataset)}\")\n"
"print(f\"Number of test graphs: {len(test_dataset)}\")"
]
},
{
Expand All @@ -168,8 +168,10 @@
}
],
"source": [
"train_loader = DataLoader(train_dataset,batch_size=10)#,sampler=ImbalancedSampler(train_dataset))\n",
"test_loader = DataLoader(test_dataset,batch_size=10)\n",
"train_loader = DataLoader(\n",
" train_dataset, batch_size=10\n",
") # ,sampler=ImbalancedSampler(train_dataset))\n",
"test_loader = DataLoader(test_dataset, batch_size=10)\n",
"\n",
"\n",
"for batch in train_loader:\n",
Expand Down Expand Up @@ -204,7 +206,7 @@
"from operator import concat\n",
"from torch.nn import Linear\n",
"import torch.nn.functional as F\n",
"from torch_geometric.nn import GCNConv, TAGConv,TransformerConv\n",
"from torch_geometric.nn import GCNConv, TAGConv, TransformerConv\n",
"from torch_geometric.nn import global_mean_pool\n",
"\n",
"\n",
Expand Down Expand Up @@ -236,7 +238,7 @@
"\n",
"\n",
"model = GCN(hidden_channels=64)\n",
"print(model(batch))\n"
"print(model(batch))"
]
},
{
Expand Down Expand Up @@ -301,7 +303,9 @@
"\n",
" for data in train_loader: # Iterate in batches over the training dataset.\n",
" out = model(data) # Perform a single forward pass.\n",
" loss = criterion(out, torch.tensor(data.betti_numbers,dtype=torch.float)) # Compute the loss.\n",
" loss = criterion(\n",
" out, torch.tensor(data.betti_numbers, dtype=torch.float)\n",
" ) # Compute the loss.\n",
" loss.backward() # Derive gradients.\n",
" optimizer.step() # Update parameters based on gradients.\n",
" optimizer.zero_grad() # Clear gradients.\n",
Expand All @@ -313,8 +317,10 @@
" losses = 0\n",
" for data in loader: # Iterate in batches over the training/test dataset.\n",
" out = model(data)\n",
" \n",
" losses += criterion(out, torch.tensor(data.betti_numbers,dtype=torch.float))\n",
"\n",
" losses += criterion(\n",
" out, torch.tensor(data.betti_numbers, dtype=torch.float)\n",
" )\n",
" return losses\n",
"\n",
"\n",
Expand Down Expand Up @@ -356,12 +362,12 @@
"y = torch.vstack(y)\n",
"\n",
"\n",
"incorrect = torch.count_nonzero(y-y_hat!=0).item()\n",
"correct = torch.count_nonzero(y-y_hat==0).item()\n",
"incorrect = torch.count_nonzero(y - y_hat != 0).item()\n",
"correct = torch.count_nonzero(y - y_hat == 0).item()\n",
"\n",
"print(\"predicted incorrect\",correct)\n",
"print(\"predicted incorrect\",incorrect)\n",
"print(\"percentage correct\", correct / (correct + incorrect))\n"
"print(\"predicted incorrect\", correct)\n",
"print(\"predicted incorrect\", incorrect)\n",
"print(\"percentage correct\", correct / (correct + incorrect))"
]
},
{
Expand Down
38 changes: 23 additions & 15 deletions experiment_betti_TransformerConv.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@
" FaceToEdge(remove_faces=False),\n",
" DegreeTransform(),\n",
" OrientableToClassTransform(),\n",
" OneHotDegree(max_degree=8,cat=False)\n",
" OneHotDegree(max_degree=8, cat=False),\n",
" ]\n",
")\n",
"\n",
Expand Down Expand Up @@ -148,7 +148,7 @@
"test_dataset = dataset[-150:]\n",
"\n",
"print(f\"Number of training graphs: {len(train_dataset)}\")\n",
"print(f\"Number of test graphs: {len(test_dataset)}\")\n"
"print(f\"Number of test graphs: {len(test_dataset)}\")"
]
},
{
Expand All @@ -168,8 +168,10 @@
}
],
"source": [
"train_loader = DataLoader(train_dataset,batch_size=10)#,sampler=ImbalancedSampler(train_dataset))\n",
"test_loader = DataLoader(test_dataset,batch_size=10)\n",
"train_loader = DataLoader(\n",
" train_dataset, batch_size=10\n",
") # ,sampler=ImbalancedSampler(train_dataset))\n",
"test_loader = DataLoader(test_dataset, batch_size=10)\n",
"\n",
"\n",
"for batch in train_loader:\n",
Expand Down Expand Up @@ -204,15 +206,17 @@
"from operator import concat\n",
"from torch.nn import Linear\n",
"import torch.nn.functional as F\n",
"from torch_geometric.nn import GCNConv, TAGConv,TransformerConv\n",
"from torch_geometric.nn import GCNConv, TAGConv, TransformerConv\n",
"from torch_geometric.nn import global_mean_pool\n",
"\n",
"\n",
"class GCN(torch.nn.Module):\n",
" def __init__(self, hidden_channels):\n",
" super(GCN, self).__init__()\n",
" torch.manual_seed(12345)\n",
" self.conv1 = TransformerConv(dataset.num_node_features, hidden_channels)\n",
" self.conv1 = TransformerConv(\n",
" dataset.num_node_features, hidden_channels\n",
" )\n",
" self.conv2 = TransformerConv(hidden_channels, hidden_channels)\n",
" self.conv3 = TransformerConv(hidden_channels, hidden_channels)\n",
" self.lin = Linear(hidden_channels, 3)\n",
Expand All @@ -236,7 +240,7 @@
"\n",
"\n",
"model = GCN(hidden_channels=64)\n",
"print(model(batch))\n"
"print(model(batch))"
]
},
{
Expand Down Expand Up @@ -301,7 +305,9 @@
"\n",
" for data in train_loader: # Iterate in batches over the training dataset.\n",
" out = model(data) # Perform a single forward pass.\n",
" loss = criterion(out, torch.tensor(data.betti_numbers,dtype=torch.float)) # Compute the loss.\n",
" loss = criterion(\n",
" out, torch.tensor(data.betti_numbers, dtype=torch.float)\n",
" ) # Compute the loss.\n",
" loss.backward() # Derive gradients.\n",
" optimizer.step() # Update parameters based on gradients.\n",
" optimizer.zero_grad() # Clear gradients.\n",
Expand All @@ -313,8 +319,10 @@
" losses = 0\n",
" for data in loader: # Iterate in batches over the training/test dataset.\n",
" out = model(data)\n",
" \n",
" losses += criterion(out, torch.tensor(data.betti_numbers,dtype=torch.float))\n",
"\n",
" losses += criterion(\n",
" out, torch.tensor(data.betti_numbers, dtype=torch.float)\n",
" )\n",
" return losses\n",
"\n",
"\n",
Expand Down Expand Up @@ -356,12 +364,12 @@
"y = torch.vstack(y)\n",
"\n",
"\n",
"incorrect = torch.count_nonzero(y-y_hat!=0).item()\n",
"correct = torch.count_nonzero(y-y_hat==0).item()\n",
"incorrect = torch.count_nonzero(y - y_hat != 0).item()\n",
"correct = torch.count_nonzero(y - y_hat == 0).item()\n",
"\n",
"print(\"predicted incorrect\",correct)\n",
"print(\"predicted incorrect\",incorrect)\n",
"print(\"percentage correct\", correct / (correct + incorrect))\n"
"print(\"predicted incorrect\", correct)\n",
"print(\"predicted incorrect\", incorrect)\n",
"print(\"percentage correct\", correct / (correct + incorrect))"
]
},
{
Expand Down
34 changes: 20 additions & 14 deletions experiment_betti_gnn.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@
" FaceToEdge(remove_faces=False),\n",
" DegreeTransform(),\n",
" OrientableToClassTransform(),\n",
" OneHotDegree(max_degree=8,cat=False)\n",
" OneHotDegree(max_degree=8, cat=False),\n",
" ]\n",
")\n",
"\n",
Expand Down Expand Up @@ -148,7 +148,7 @@
"test_dataset = dataset[-150:]\n",
"\n",
"print(f\"Number of training graphs: {len(train_dataset)}\")\n",
"print(f\"Number of test graphs: {len(test_dataset)}\")\n"
"print(f\"Number of test graphs: {len(test_dataset)}\")"
]
},
{
Expand All @@ -168,8 +168,10 @@
}
],
"source": [
"train_loader = DataLoader(train_dataset,batch_size=10)#,sampler=ImbalancedSampler(train_dataset))\n",
"test_loader = DataLoader(test_dataset,batch_size=10)\n",
"train_loader = DataLoader(\n",
" train_dataset, batch_size=10\n",
") # ,sampler=ImbalancedSampler(train_dataset))\n",
"test_loader = DataLoader(test_dataset, batch_size=10)\n",
"\n",
"\n",
"for batch in train_loader:\n",
Expand Down Expand Up @@ -204,7 +206,7 @@
"from operator import concat\n",
"from torch.nn import Linear\n",
"import torch.nn.functional as F\n",
"from torch_geometric.nn import GCNConv, TAGConv,TransformerConv\n",
"from torch_geometric.nn import GCNConv, TAGConv, TransformerConv\n",
"from torch_geometric.nn import global_mean_pool\n",
"\n",
"\n",
Expand Down Expand Up @@ -236,7 +238,7 @@
"\n",
"\n",
"model = GCN(hidden_channels=64)\n",
"print(model(batch))\n"
"print(model(batch))"
]
},
{
Expand Down Expand Up @@ -301,7 +303,9 @@
"\n",
" for data in train_loader: # Iterate in batches over the training dataset.\n",
" out = model(data) # Perform a single forward pass.\n",
" loss = criterion(out, torch.tensor(data.betti_numbers,dtype=torch.float)) # Compute the loss.\n",
" loss = criterion(\n",
" out, torch.tensor(data.betti_numbers, dtype=torch.float)\n",
" ) # Compute the loss.\n",
" loss.backward() # Derive gradients.\n",
" optimizer.step() # Update parameters based on gradients.\n",
" optimizer.zero_grad() # Clear gradients.\n",
Expand All @@ -313,8 +317,10 @@
" losses = 0\n",
" for data in loader: # Iterate in batches over the training/test dataset.\n",
" out = model(data)\n",
" \n",
" losses += criterion(out, torch.tensor(data.betti_numbers,dtype=torch.float))\n",
"\n",
" losses += criterion(\n",
" out, torch.tensor(data.betti_numbers, dtype=torch.float)\n",
" )\n",
" return losses\n",
"\n",
"\n",
Expand Down Expand Up @@ -356,12 +362,12 @@
"y = torch.vstack(y)\n",
"\n",
"\n",
"incorrect = torch.count_nonzero(y-y_hat!=0).item()\n",
"correct = torch.count_nonzero(y-y_hat==0).item()\n",
"incorrect = torch.count_nonzero(y - y_hat != 0).item()\n",
"correct = torch.count_nonzero(y - y_hat == 0).item()\n",
"\n",
"print(\"predicted incorrect\",correct)\n",
"print(\"predicted incorrect\",incorrect)\n",
"print(\"percentage correct\", correct / (correct + incorrect))\n"
"print(\"predicted incorrect\", correct)\n",
"print(\"predicted incorrect\", incorrect)\n",
"print(\"percentage correct\", correct / (correct + incorrect))"
]
},
{
Expand Down
Loading

0 comments on commit 14e8a39

Please sign in to comment.