Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

W&B Integration #216

Open
wants to merge 14 commits into
base: master
Choose a base branch
from
73 changes: 53 additions & 20 deletions aae/aae.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,47 @@
from __future__ import print_function, division
## USAGE: python 'aae.py' --entity your-wandb-id --project your-project --latentdim 10 --epochs 20000

from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, GaussianNoise
from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D
from keras.layers import MaxPooling2D, merge
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras import losses
from keras.utils import to_categorical
import keras.backend as K
from __future__ import print_function, division

import argparse
import numpy as np
import matplotlib.pyplot as plt

import numpy as np
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, Lambda
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import UpSampling2D, Conv2D
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import Adam
import tensorflow.keras.backend as K


import wandb
from wandb.keras import WandbCallback

parser = argparse.ArgumentParser()
parser.add_argument('--entity', type=str,
help="provide wandb entity")
parser.add_argument('--project', type=str,
help="provide wandb project name")
parser.add_argument('--latentdim', type=int, default=10,
help="specify the latent dimentions")
parser.add_argument("--epochs", type=int, default=20000,
help="number of epochs")
parser.add_argument("--batch", type=int, default=32,
help="batch size to be used")
parser.add_argument("--gen_interval", type=int, default=10,
help="log generated images after interval")
args = parser.parse_args()

class AdversarialAutoencoder():
def __init__(self):
def __init__(self, latent_dim):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 10
self.latent_dim = latent_dim

optimizer = Adam(0.0002, 0.5)

Expand Down Expand Up @@ -67,12 +86,15 @@ def build_encoder(self):
h = LeakyReLU(alpha=0.2)(h)
mu = Dense(self.latent_dim)(h)
log_var = Dense(self.latent_dim)(h)
latent_repr = merge([mu, log_var],
mode=lambda p: p[0] + K.random_normal(K.shape(p[0])) * K.exp(p[1] / 2),
output_shape=lambda p: p[0])
latent_repr = Lambda(self.latent, output_shape=(self.latent_dim, ))([mu, log_var])

return Model(img, latent_repr)

def latent(self, p):
"""Sample based on `mu` and `log_var`"""
mu, log_var = p
return mu + K.random_normal(K.shape(mu)) * K.exp(log_var / 2)

def build_decoder(self):

model = Sequential()
Expand Down Expand Up @@ -147,6 +169,7 @@ def train(self, epochs, batch_size=128, sample_interval=50):

# Plot the progress
print ("%d [D loss: %f, acc: %.2f%%] [G loss: %f, mse: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[0], g_loss[1]))
wandb.log({'epoch': epoch, 'discriminator_loss': d_loss[0], 'accuracy': 100*d_loss[1], 'generator_loss': g_loss[0], 'mse': g_loss[1]})

# If at save interval => save generated image samples
if epoch % sample_interval == 0:
Expand All @@ -168,6 +191,7 @@ def sample_images(self, epoch):
axs[i,j].axis('off')
cnt += 1
fig.savefig("images/mnist_%d.png" % epoch)
wandb.log({'aae_generated_imgs': plt})
plt.close()

def save_model(self):
Expand All @@ -186,5 +210,14 @@ def save(model, model_name):


if __name__ == '__main__':
aae = AdversarialAutoencoder()
aae.train(epochs=20000, batch_size=32, sample_interval=200)

wandb.init(entity=args.entity, project=args.project)
config = wandb.config
config.epochs = args.epochs
config.batch_size = args.batch
config.save_interval = args.gen_interval

config.latent_dim = args.latentdim

aae = AdversarialAutoencoder(config.latent_dim)
aae.train(epochs=config.epochs, batch_size=config.batch_size, sample_interval=config.save_interval)
65 changes: 52 additions & 13 deletions acgan/acgan.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,50 @@
from __future__ import print_function, division
## USAGE: python 'acgan.py' --entity your-wandb-id --project your-project --latentdim 10 --epochs 14000

from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply
from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from __future__ import print_function, division

import argparse
import numpy as np
import matplotlib.pyplot as plt

import numpy as np
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply
from tensorflow.keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import UpSampling2D, Conv2D
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import Adam
import tensorflow.keras.backend as K


import wandb
from wandb.keras import WandbCallback

parser = argparse.ArgumentParser()
parser.add_argument('--entity', type=str,
help="provide wandb entity")
parser.add_argument('--project', type=str,
help="provide wandb project name")
parser.add_argument('--latentdim', type=int, default=10,
help="specify the latent dimentions")
parser.add_argument("--epochs", type=int, default=14000,
help="number of epochs")
parser.add_argument("--batch", type=int, default=32,
help="batch size to be used")
parser.add_argument("--gen_interval", type=int, default=10,
help="log generated images after interval")
args = parser.parse_args()


class ACGAN():
def __init__(self):
def __init__(self, latent_dim):
# Input shape
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.num_classes = 10
self.latent_dim = 100
self.latent_dim = latent_dim

optimizer = Adam(0.0002, 0.5)
losses = ['binary_crossentropy', 'sparse_categorical_crossentropy']
Expand Down Expand Up @@ -167,6 +191,8 @@ def train(self, epochs, batch_size=128, sample_interval=50):

# Plot the progress
print ("%d [D loss: %f, acc.: %.2f%%, op_acc: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[3], 100*d_loss[4], g_loss[0]))
wandb.log({'epoch': epoch, 'discriminator_loss': d_loss[0], 'accuracy': 100*d_loss[3], 'op_accuracy': 100*d_loss[4], 'generator_loss': g_loss[0]})


# If at save interval => save generated image samples
if epoch % sample_interval == 0:
Expand All @@ -189,6 +215,7 @@ def sample_images(self, epoch):
axs[i,j].axis('off')
cnt += 1
fig.savefig("images/%d.png" % epoch)
wandb.log({'acgan_generated_imgs': plt})
plt.close()

def save_model(self):
Expand All @@ -207,5 +234,17 @@ def save(model, model_name):


if __name__ == '__main__':
acgan = ACGAN()
acgan.train(epochs=14000, batch_size=32, sample_interval=200)

wandb.init(entity=args.entity, project=args.project)
config = wandb.config

config.epochs = args.epochs
config.batch_size = args.batch
config.save_interval = args.gen_interval

config.latent_dim = args.latentdim



acgan = ACGAN(config.latent_dim)
acgan.train(epochs=config.epochs, batch_size=config.batch_size, sample_interval=config.save_interval)
63 changes: 48 additions & 15 deletions bgan/bgan.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,49 @@
from __future__ import print_function, division
## USAGE: python 'bgan.py' --entity your-wandb-id --project your-project --latentdim 100 --epochs 30000

from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
import keras.backend as K
from __future__ import print_function, division

import argparse
import numpy as np
import matplotlib.pyplot as plt

import sys
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout
from tensorflow.keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import UpSampling2D, Conv2D
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import Adam
import tensorflow.keras.backend as K


import wandb
from wandb.keras import WandbCallback

parser = argparse.ArgumentParser()
parser.add_argument('--entity', type=str,
help="provide wandb entity")
parser.add_argument('--project', type=str,
help="provide wandb project name")
parser.add_argument('--latentdim', type=int, default=100,
help="specify the latent dimentions")
parser.add_argument("--epochs", type=int, default=30000,
help="number of epochs")
parser.add_argument("--batch", type=int, default=32,
help="batch size to be used")
parser.add_argument("--gen_interval", type=int, default=10,
help="log generated images after interval")
args = parser.parse_args()

import numpy as np

class BGAN():
"""Reference: https://wiseodd.github.io/techblog/2017/03/07/boundary-seeking-gan/"""
def __init__(self):
def __init__(self, latent_dim):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
self.latent_dim = latent_dim

optimizer = Adam(0.0002, 0.5)

Expand Down Expand Up @@ -139,6 +160,7 @@ def train(self, epochs, batch_size=128, sample_interval=50):

# Plot the progress
print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
wandb.log({'epoch': epoch, 'discriminator_loss': d_loss[0], 'accuracy': 100*d_loss[1], 'generator_loss': g_loss})

# If at save interval => save generated image samples
if epoch % sample_interval == 0:
Expand All @@ -159,9 +181,20 @@ def sample_images(self, epoch):
axs[i,j].axis('off')
cnt += 1
fig.savefig("images/mnist_%d.png" % epoch)
wandb.log({'bgan_generated_imgs': plt})
plt.close()


if __name__ == '__main__':
bgan = BGAN()
bgan.train(epochs=30000, batch_size=32, sample_interval=200)

wandb.init(entity=args.entity, project=args.project)
config = wandb.config

config.epochs = args.epochs
config.batch_size = args.batch
config.save_interval = args.gen_interval

config.latent_dim = args.latentdim

bgan = BGAN(config.latent_dim)
bgan.train(epochs=config.epochs, batch_size=config.batch_size, sample_interval=config.save_interval)
Loading