Skip to content

Commit

Permalink
train
Browse files Browse the repository at this point in the history
  • Loading branch information
Kye committed Oct 6, 2023
1 parent bd7dbfd commit c34e294
Show file tree
Hide file tree
Showing 2 changed files with 60 additions and 3 deletions.
1 change: 1 addition & 0 deletions swarms_torch/cellular_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,3 +89,4 @@ def forward(self, x):

x = cell(x, neighbors)
return x

62 changes: 59 additions & 3 deletions swarms_torch/graph_cellular_automa.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,15 +70,71 @@ def forward(self, node_embeddings, adjacency_matrix):

return updated_embeddings, replication_decisions, edge_weights

# Usage examples
# # Usage examples
# embedding_dim = 16
# hidden_dim = 32
# node_embeddings = torch.rand((10, embedding_dim)) # For 10 nodes
# adjacency_matrix = torch.rand((10, 10)) # Dummy adjacency matrix for 10 nodes

# model = NDP(embedding_dim, hidden_dim)
# updated_embeddings, replication_decisions, edge_weights = model(node_embeddings, adjacency_matrix)

# print(updated_embeddings.shape)
# print(replication_decisions.shape)
# print(edge_weights.shape)



import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms



# Define the training function
def train(model, train_loader, optimizer, criterion):
model.train()

for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()

# Set hyperparameters
embedding_dim = 16
hidden_dim = 32
learning_rate = 0.001
batch_size = 64
epochs = 10

# Load MNIST dataset
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])

train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

# Initialize the model, optimizer, and loss function
model = NDP(embedding_dim, hidden_dim)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()

# Training loop
for epoch in range(epochs):
train(model, train_loader, optimizer, criterion)
print(f"Epoch {epoch+1}/{epochs} completed")

# Usage examples
node_embeddings = torch.rand((10, embedding_dim)) # For 10 nodes
adjacency_matrix = torch.rand((10, 10)) # Dummy adjacency matrix for 10 nodes

model = NDP(embedding_dim, hidden_dim)
updated_embeddings, replication_decisions, edge_weights = model(node_embeddings, adjacency_matrix)

print(updated_embeddings.shape)
print(replication_decisions.shape)
print(edge_weights.shape)
print(edge_weights.shape)

0 comments on commit c34e294

Please sign in to comment.