From 1c0d6274901d980e0f0548cd471f216c801d7814 Mon Sep 17 00:00:00 2001 From: Link Date: Mon, 23 Oct 2017 11:43:28 +0800 Subject: [PATCH 1/7] Update --- src/mnist/dg_mnist.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/mnist/dg_mnist.py b/src/mnist/dg_mnist.py index 3405096..1b227dc 100644 --- a/src/mnist/dg_mnist.py +++ b/src/mnist/dg_mnist.py @@ -25,13 +25,13 @@ def Minibatch_Discriminator(input, num_kernels=100, dim_per_kernel=5, init=False num_inputs=df_dim*4 theta = tf.get_variable(name+"/theta",[num_inputs, num_kernels, dim_per_kernel], initializer=tf.random_normal_initializer(stddev=0.05)) log_weight_scale = tf.get_variable(name+"/lws",[num_kernels, dim_per_kernel], initializer=tf.constant_initializer(0.0)) - W = tf.mul(theta, tf.expand_dims(tf.exp(log_weight_scale)/tf.sqrt(tf.reduce_sum(tf.square(theta),0)),0)) + W = tf.multiply(theta, tf.expand_dims(tf.exp(log_weight_scale)/tf.sqrt(tf.reduce_sum(tf.square(theta),0)),0)) W = tf.reshape(W,[-1,num_kernels*dim_per_kernel]) x = input x=tf.reshape(x, [batchsize,num_inputs]) activation = tf.matmul(x, W) activation = tf.reshape(activation,[-1,num_kernels,dim_per_kernel]) - abs_dif = tf.mul(tf.reduce_sum(tf.abs(tf.sub(tf.expand_dims(activation,3),tf.expand_dims(tf.transpose(activation,[1,2,0]),0))),2), + abs_dif = tf.multiply(tf.reduce_sum(tf.abs(tf.sub(tf.expand_dims(activation,3),tf.expand_dims(tf.transpose(activation,[1,2,0]),0))),2), 1-tf.expand_dims(tf.constant(np.eye(batchsize),dtype=np.float32),1)) f = tf.reduce_sum(tf.exp(-abs_dif),2)/tf.reduce_sum(tf.exp(-abs_dif)) print(f.get_shape()) @@ -118,7 +118,7 @@ def generator(z): # Our Mixture Model modifications zin = tf.get_variable("g_z", [batchsize, z_dim],initializer=tf.random_uniform_initializer(-1,1)) zsig = tf.get_variable("g_sig", [batchsize, z_dim],initializer=tf.constant_initializer(0.2)) - inp = tf.add(zin,tf.mul(z,zsig)) + inp = tf.add(zin,tf.multiply(z,zsig)) # inp = z # Uncomment this line when training/testing baseline GAN G = generator(inp) D_prob, D_logit = discriminator(images) From 102ea2809621f502d5efc2a6a53f7095444d0311 Mon Sep 17 00:00:00 2001 From: Link Date: Mon, 23 Oct 2017 11:43:50 +0800 Subject: [PATCH 2/7] Update --- src/mnist/dg_mnist.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mnist/dg_mnist.py b/src/mnist/dg_mnist.py index 1b227dc..00820a0 100644 --- a/src/mnist/dg_mnist.py +++ b/src/mnist/dg_mnist.py @@ -31,7 +31,7 @@ def Minibatch_Discriminator(input, num_kernels=100, dim_per_kernel=5, init=False x=tf.reshape(x, [batchsize,num_inputs]) activation = tf.matmul(x, W) activation = tf.reshape(activation,[-1,num_kernels,dim_per_kernel]) - abs_dif = tf.multiply(tf.reduce_sum(tf.abs(tf.sub(tf.expand_dims(activation,3),tf.expand_dims(tf.transpose(activation,[1,2,0]),0))),2), + abs_dif = tf.multiply(tf.reduce_sum(tf.abs(tf.subtract(tf.expand_dims(activation,3),tf.expand_dims(tf.transpose(activation,[1,2,0]),0))),2), 1-tf.expand_dims(tf.constant(np.eye(batchsize),dtype=np.float32),1)) f = tf.reduce_sum(tf.exp(-abs_dif),2)/tf.reduce_sum(tf.exp(-abs_dif)) print(f.get_shape()) From 15697ea4050489aae12788b62beb79d717b03432 Mon Sep 17 00:00:00 2001 From: Link Date: Mon, 23 Oct 2017 11:44:25 +0800 Subject: [PATCH 3/7] Update --- src/mnist/dg_mnist.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mnist/dg_mnist.py b/src/mnist/dg_mnist.py index 00820a0..0dd2abb 100644 --- a/src/mnist/dg_mnist.py +++ b/src/mnist/dg_mnist.py @@ -25,7 +25,7 @@ def Minibatch_Discriminator(input, num_kernels=100, dim_per_kernel=5, init=False num_inputs=df_dim*4 theta = tf.get_variable(name+"/theta",[num_inputs, num_kernels, dim_per_kernel], initializer=tf.random_normal_initializer(stddev=0.05)) log_weight_scale = tf.get_variable(name+"/lws",[num_kernels, dim_per_kernel], initializer=tf.constant_initializer(0.0)) - W = tf.multiply(theta, tf.expand_dims(tf.exp(log_weight_scale)/tf.sqrt(tf.reduce_sum(tf.square(theta),0)),0)) + W = tf.mul(theta, tf.expand_dims(tf.exp(log_weight_scale)/tf.sqrt(tf.reduce_sum(tf.square(theta),0)),0)) W = tf.reshape(W,[-1,num_kernels*dim_per_kernel]) x = input x=tf.reshape(x, [batchsize,num_inputs]) From acbd28b547c9cbb24312311c7cb08b87552500a0 Mon Sep 17 00:00:00 2001 From: Link Date: Mon, 23 Oct 2017 11:45:18 +0800 Subject: [PATCH 4/7] Update tensorflow 1.0.0 tf.mul and tf.sub has been replaced with tf.multiply and tf.subtract in 1.0 release --- src/mnist/dg_mnist.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mnist/dg_mnist.py b/src/mnist/dg_mnist.py index 0dd2abb..00820a0 100644 --- a/src/mnist/dg_mnist.py +++ b/src/mnist/dg_mnist.py @@ -25,7 +25,7 @@ def Minibatch_Discriminator(input, num_kernels=100, dim_per_kernel=5, init=False num_inputs=df_dim*4 theta = tf.get_variable(name+"/theta",[num_inputs, num_kernels, dim_per_kernel], initializer=tf.random_normal_initializer(stddev=0.05)) log_weight_scale = tf.get_variable(name+"/lws",[num_kernels, dim_per_kernel], initializer=tf.constant_initializer(0.0)) - W = tf.mul(theta, tf.expand_dims(tf.exp(log_weight_scale)/tf.sqrt(tf.reduce_sum(tf.square(theta),0)),0)) + W = tf.multiply(theta, tf.expand_dims(tf.exp(log_weight_scale)/tf.sqrt(tf.reduce_sum(tf.square(theta),0)),0)) W = tf.reshape(W,[-1,num_kernels*dim_per_kernel]) x = input x=tf.reshape(x, [batchsize,num_inputs]) From 39ae077a1b75ff8489c7ccd2bc5c4066f4d37771 Mon Sep 17 00:00:00 2001 From: Link Date: Tue, 24 Oct 2017 20:25:04 +0800 Subject: [PATCH 5/7] Update --- src/mnist/dg_mnist.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mnist/dg_mnist.py b/src/mnist/dg_mnist.py index 00820a0..4f568d5 100644 --- a/src/mnist/dg_mnist.py +++ b/src/mnist/dg_mnist.py @@ -36,7 +36,7 @@ def Minibatch_Discriminator(input, num_kernels=100, dim_per_kernel=5, init=False f = tf.reduce_sum(tf.exp(-abs_dif),2)/tf.reduce_sum(tf.exp(-abs_dif)) print(f.get_shape()) print(input.get_shape()) - return tf.concat(1,[x, f]) + return tf.concat([x,f], 1) def linear(x,output_dim, name="linear"): From c86b9fdb9e50b1ca98053e0930398f2d7ee6fb46 Mon Sep 17 00:00:00 2001 From: Link Date: Tue, 24 Oct 2017 20:32:02 +0800 Subject: [PATCH 6/7] Update --- src/mnist/dg_mnist.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/mnist/dg_mnist.py b/src/mnist/dg_mnist.py index 4f568d5..65986fb 100644 --- a/src/mnist/dg_mnist.py +++ b/src/mnist/dg_mnist.py @@ -125,11 +125,11 @@ def generator(z): D_fake_prob, D_fake_logit = discriminator(G, Reuse=True) - d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(D_logit, tf.ones_like(D_logit))) - d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(D_fake_logit, tf.zeros_like(D_fake_logit))) + d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=D_logit, logits=tf.ones_like(D_logit))) + d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=D_fake_logit, logits=tf.zeros_like(D_fake_logit))) sigma_loss = tf.reduce_mean(tf.square(zsig-1))/3 # sigma regularizer - gloss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(D_fake_logit, tf.ones_like(D_fake_logit))) + gloss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=D_fake_logit, logits=tf.ones_like(D_fake_logit))) dloss = d_loss_real + d_loss_fake t_vars = tf.trainable_variables() From f5cf1993da910a6fff1fd9aa3d24d0f75094795b Mon Sep 17 00:00:00 2001 From: Link Date: Wed, 25 Oct 2017 09:43:05 +0800 Subject: [PATCH 7/7] Update --- src/Toy/dg_toy.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Toy/dg_toy.py b/src/Toy/dg_toy.py index c6f1bfd..4db16c2 100644 --- a/src/Toy/dg_toy.py +++ b/src/Toy/dg_toy.py @@ -51,7 +51,7 @@ def generator(z, n): zin = tf.get_variable("g_z", [batchsize, z_dim],initializer=tf.random_uniform_initializer(-1,1)) zsig = tf.get_variable("g_sig", [batchsize, z_dim],initializer=tf.constant_initializer(0.02)) - inp = tf.add(zin,tf.mul(z,zsig)) #Uncomment this line for testing the DeliGAN + inp = tf.add(zin,tf.multiply(z,zsig)) #Uncomment this line for testing the DeliGAN #moe = tf.eye(batchsize) #Uncomment this line for testing the MoE-GAN #inp = tf.concat_v2([moe, z],1) #Uncomment this line for testing the MoE-GAN @@ -70,9 +70,9 @@ def generator(z, n): # Defining Losses sig_loss = 0.1*tf.reduce_mean(tf.square(zsig-1)) - d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(D_logit, tf.ones_like(D_logit))) - d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(D_fake_logit, tf.zeros_like(D_fake_logit))) - gloss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(D_fake_logit, tf.ones_like(D_fake_logit))) + d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=D_logit, logits=tf.ones_like(D_logit))) + d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=D_fake_logit, logits=tf.zeros_like(D_fake_logit))) + gloss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=D_fake_logit, logits=tf.ones_like(D_fake_logit))) gloss1 = gloss+sig_loss dloss = d_loss_real + d_loss_fake