Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Convert To python 3 update readme for windows #31

Open
wants to merge 11 commits into
base: master
Choose a base branch
from
13 changes: 13 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,3 +24,16 @@ download URL is in the file.
- `python gan_64x64.py`: 64x64 architectures (this code trains on ImageNet instead of LSUN bedrooms in the paper)
- `python gan_language.py`: Character-level language model
- `python gan_cifar.py`: CIFAR-10


## Installation Windows

Install anaconda
https://www.continuum.io/downloads#windows

https://www.tensorflow.org/install/install_windows

conda install Pillow
conda install -c anaconda scipy=0.19.0
conda install scikit-learn
conda install matplotlib
70 changes: 35 additions & 35 deletions gan_64x64.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def GeneratorAndDiscriminator():

raise Exception('You must choose an architecture!')

DEVICES = ['/gpu:{}'.format(i) for i in xrange(N_GPUS)]
DEVICES = ['/gpu:{}'.format(i) for i in range(N_GPUS)]

def LeakyReLU(x, alpha=0.2):
return tf.maximum(alpha*x, x)
Expand Down Expand Up @@ -298,20 +298,20 @@ def ResnetGenerator(n_samples, noise=None, dim=DIM):
output = lib.ops.linear.Linear('Generator.Input', 128, 4*4*8*dim, noise)
output = tf.reshape(output, [-1, 8*dim, 4, 4])

for i in xrange(6):
output = BottleneckResidualBlock('Generator.4x4_{}'.format(i), 8*dim, 8*dim, 3, output, resample=None)
output = BottleneckResidualBlock('Generator.Up1', 8*dim, 4*dim, 3, output, resample='up')
for i in xrange(6):
output = BottleneckResidualBlock('Generator.8x8_{}'.format(i), 4*dim, 4*dim, 3, output, resample=None)
output = BottleneckResidualBlock('Generator.Up2', 4*dim, 2*dim, 3, output, resample='up')
for i in xrange(6):
output = BottleneckResidualBlock('Generator.16x16_{}'.format(i), 2*dim, 2*dim, 3, output, resample=None)
output = BottleneckResidualBlock('Generator.Up3', 2*dim, 1*dim, 3, output, resample='up')
for i in xrange(6):
output = BottleneckResidualBlock('Generator.32x32_{}'.format(i), 1*dim, 1*dim, 3, output, resample=None)
output = BottleneckResidualBlock('Generator.Up4', 1*dim, dim/2, 3, output, resample='up')
for i in xrange(5):
output = BottleneckResidualBlock('Generator.64x64_{}'.format(i), dim/2, dim/2, 3, output, resample=None)
for i in range(6):
output = ResidualBlock('Generator.4x4_{}'.format(i), 8*dim, 8*dim, 3, output, resample=None)
output = ResidualBlock('Generator.Up1', 8*dim, 4*dim, 3, output, resample='up')
for i in range(6):
output = ResidualBlock('Generator.8x8_{}'.format(i), 4*dim, 4*dim, 3, output, resample=None)
output = ResidualBlock('Generator.Up2', 4*dim, 2*dim, 3, output, resample='up')
for i in range(6):
output = ResidualBlock('Generator.16x16_{}'.format(i), 2*dim, 2*dim, 3, output, resample=None)
output = ResidualBlock('Generator.Up3', 2*dim, 1*dim, 3, output, resample='up')
for i in range(6):
output = ResidualBlock('Generator.32x32_{}'.format(i), 1*dim, 1*dim, 3, output, resample=None)
output = ResidualBlock('Generator.Up4', 1*dim, dim/2, 3, output, resample='up')
for i in range(5):
output = ResidualBlock('Generator.64x64_{}'.format(i), dim/2, dim/2, 3, output, resample=None)

output = lib.ops.conv2d.Conv2D('Generator.Out', dim/2, 3, 1, output, he_init=False)
output = tf.tanh(output / 5.)
Expand Down Expand Up @@ -396,20 +396,20 @@ def ResnetDiscriminator(inputs, dim=DIM):
output = tf.reshape(inputs, [-1, 3, 64, 64])
output = lib.ops.conv2d.Conv2D('Discriminator.In', 3, dim/2, 1, output, he_init=False)

for i in xrange(5):
output = BottleneckResidualBlock('Discriminator.64x64_{}'.format(i), dim/2, dim/2, 3, output, resample=None)
output = BottleneckResidualBlock('Discriminator.Down1', dim/2, dim*1, 3, output, resample='down')
for i in xrange(6):
output = BottleneckResidualBlock('Discriminator.32x32_{}'.format(i), dim*1, dim*1, 3, output, resample=None)
output = BottleneckResidualBlock('Discriminator.Down2', dim*1, dim*2, 3, output, resample='down')
for i in xrange(6):
output = BottleneckResidualBlock('Discriminator.16x16_{}'.format(i), dim*2, dim*2, 3, output, resample=None)
output = BottleneckResidualBlock('Discriminator.Down3', dim*2, dim*4, 3, output, resample='down')
for i in xrange(6):
output = BottleneckResidualBlock('Discriminator.8x8_{}'.format(i), dim*4, dim*4, 3, output, resample=None)
output = BottleneckResidualBlock('Discriminator.Down4', dim*4, dim*8, 3, output, resample='down')
for i in xrange(6):
output = BottleneckResidualBlock('Discriminator.4x4_{}'.format(i), dim*8, dim*8, 3, output, resample=None)
for i in range(5):
output = ResidualBlock('Discriminator.64x64_{}'.format(i), dim/2, dim/2, 3, output, resample=None)
output = ResidualBlock('Discriminator.Down1', dim/2, dim*1, 3, output, resample='down')
for i in range(6):
output = ResidualBlock('Discriminator.32x32_{}'.format(i), dim*1, dim*1, 3, output, resample=None)
output = ResidualBlock('Discriminator.Down2', dim*1, dim*2, 3, output, resample='down')
for i in range(6):
output = ResidualBlock('Discriminator.16x16_{}'.format(i), dim*2, dim*2, 3, output, resample=None)
output = ResidualBlock('Discriminator.Down3', dim*2, dim*4, 3, output, resample='down')
for i in range(6):
output = ResidualBlock('Discriminator.8x8_{}'.format(i), dim*4, dim*4, 3, output, resample=None)
output = ResidualBlock('Discriminator.Down4', dim*4, dim*8, 3, output, resample='down')
for i in range(6):
output = ResidualBlock('Discriminator.4x4_{}'.format(i), dim*8, dim*8, 3, output, resample=None)

output = tf.reshape(output, [-1, 4*4*8*dim])
output = lib.ops.linear.Linear('Discriminator.Output', 4*4*8*dim, 1, output)
Expand All @@ -419,7 +419,7 @@ def ResnetDiscriminator(inputs, dim=DIM):

def FCDiscriminator(inputs, FC_DIM=512, n_layers=3):
output = LeakyReLULayer('Discriminator.Input', OUTPUT_DIM, FC_DIM, inputs)
for i in xrange(n_layers):
for i in range(n_layers):
output = LeakyReLULayer('Discriminator.{}'.format(i), FC_DIM, FC_DIM, output)
output = lib.ops.linear.Linear('Discriminator.Out', FC_DIM, 1, output)

Expand Down Expand Up @@ -584,16 +584,16 @@ def inf_train_gen():
yield images

# Save a batch of ground-truth samples
_x = inf_train_gen().next()
_x_r = session.run(real_data, feed_dict={real_data_conv: _x[:BATCH_SIZE/N_GPUS]})
_x = next(inf_train_gen())
_x_r = session.run(real_data, feed_dict={real_data_conv: _x})
_x_r = ((_x_r+1.)*(255.99/2)).astype('int32')
lib.save_images.save_images(_x_r.reshape((BATCH_SIZE/N_GPUS, 3, 64, 64)), 'samples_groundtruth.png')


# Train loop
session.run(tf.initialize_all_variables())
gen = inf_train_gen()
for iteration in xrange(ITERS):
for iteration in range(ITERS):

start_time = time.time()

Expand All @@ -606,8 +606,8 @@ def inf_train_gen():
disc_iters = 1
else:
disc_iters = CRITIC_ITERS
for i in xrange(disc_iters):
_data = gen.next()
for i in range(disc_iters):
_data = next(gen)
_disc_cost, _ = session.run([disc_cost, disc_train_op], feed_dict={all_real_data_conv: _data})
if MODE == 'wgan':
_ = session.run([clip_disc_weights])
Expand Down
8 changes: 4 additions & 4 deletions gan_cifar.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def generate_image(frame, true_dist):
samples_100 = Generator(100)
def get_inception_score():
all_samples = []
for i in xrange(10):
for i in range(10):
all_samples.append(session.run(samples_100))
all_samples = np.concatenate(all_samples, axis=0)
all_samples = ((all_samples+1.)*(255./2)).astype('int32')
Expand All @@ -179,7 +179,7 @@ def inf_train_gen():
session.run(tf.initialize_all_variables())
gen = inf_train_gen()

for iteration in xrange(ITERS):
for iteration in range(ITERS):
start_time = time.time()
# Train generator
if iteration > 0:
Expand All @@ -189,8 +189,8 @@ def inf_train_gen():
disc_iters = 1
else:
disc_iters = CRITIC_ITERS
for i in xrange(disc_iters):
_data = gen.next()
for i in range(disc_iters):
_data = next(gen)
_disc_cost, _ = session.run([disc_cost, disc_train_op], feed_dict={real_data_int: _data})
if MODE == 'wgan':
_ = session.run(clip_disc_weights)
Expand Down
30 changes: 15 additions & 15 deletions gan_language.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

# Download Google Billion Word at http://www.statmt.org/lm-benchmark/ and
# fill in the path to the extracted files here!
DATA_DIR = ''
DATA_DIR = 'data'
if len(DATA_DIR) == 0:
raise Exception('Please specify path to data directory in gan_language.py!')

Expand Down Expand Up @@ -118,7 +118,7 @@ def Discriminator(inputs):
def inf_train_gen():
while True:
np.random.shuffle(lines)
for i in xrange(0, len(lines)-BATCH_SIZE+1, BATCH_SIZE):
for i in range(0, len(lines)-BATCH_SIZE+1, BATCH_SIZE):
yield np.array(
[[charmap[c] for c in l] for l in lines[i:i+BATCH_SIZE]],
dtype='int32'
Expand All @@ -127,11 +127,11 @@ def inf_train_gen():
# During training we monitor JS divergence between the true & generated ngram
# distributions for n=1,2,3,4. To get an idea of the optimal values, we
# evaluate these statistics on a held-out set first.
true_char_ngram_lms = [language_helpers.NgramLanguageModel(i+1, lines[10*BATCH_SIZE:], tokenize=False) for i in xrange(4)]
validation_char_ngram_lms = [language_helpers.NgramLanguageModel(i+1, lines[:10*BATCH_SIZE], tokenize=False) for i in xrange(4)]
for i in xrange(4):
print "validation set JSD for n={}: {}".format(i+1, true_char_ngram_lms[i].js_with(validation_char_ngram_lms[i]))
true_char_ngram_lms = [language_helpers.NgramLanguageModel(i+1, lines, tokenize=False) for i in xrange(4)]
true_char_ngram_lms = [language_helpers.NgramLanguageModel(i+1, lines[10*BATCH_SIZE:], tokenize=False) for i in range(4)]
validation_char_ngram_lms = [language_helpers.NgramLanguageModel(i+1, lines[:10*BATCH_SIZE], tokenize=False) for i in range(4)]
for i in range(4):
print("validation set JSD for n={}: {}".format(i+1, true_char_ngram_lms[i].js_with(validation_char_ngram_lms[i])))
true_char_ngram_lms = [language_helpers.NgramLanguageModel(i+1, lines, tokenize=False) for i in range(4)]

with tf.Session() as session:

Expand All @@ -141,25 +141,25 @@ def generate_samples():
samples = session.run(fake_inputs)
samples = np.argmax(samples, axis=2)
decoded_samples = []
for i in xrange(len(samples)):
for i in range(len(samples)):
decoded = []
for j in xrange(len(samples[i])):
for j in range(len(samples[i])):
decoded.append(inv_charmap[samples[i][j]])
decoded_samples.append(tuple(decoded))
return decoded_samples

gen = inf_train_gen()

for iteration in xrange(ITERS):
for iteration in range(ITERS):
start_time = time.time()

# Train generator
if iteration > 0:
_ = session.run(gen_train_op)

# Train critic
for i in xrange(CRITIC_ITERS):
_data = gen.next()
for i in range(CRITIC_ITERS):
_data = next(gen)
_disc_cost, _ = session.run(
[disc_cost, disc_train_op],
feed_dict={real_inputs_discrete:_data}
Expand All @@ -170,14 +170,14 @@ def generate_samples():

if iteration % 100 == 99:
samples = []
for i in xrange(10):
for i in range(10):
samples.extend(generate_samples())

for i in xrange(4):
for i in range(4):
lm = language_helpers.NgramLanguageModel(i+1, samples, tokenize=False)
lib.plot.plot('js{}'.format(i+1), lm.js_with(true_char_ngram_lms[i]))

with open('samples_{}.txt'.format(iteration), 'w') as f:
with open('samples_{}.txt'.format(iteration), 'w', encoding="utf-8") as f:
for s in samples:
s = "".join(s)
f.write(s + "\n")
Expand Down
6 changes: 3 additions & 3 deletions gan_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ def inf_train_gen():

gen = inf_train_gen()

for iteration in xrange(ITERS):
for iteration in range(ITERS):
start_time = time.time()

if iteration > 0:
Expand All @@ -221,8 +221,8 @@ def inf_train_gen():
disc_iters = 1
else:
disc_iters = CRITIC_ITERS
for i in xrange(disc_iters):
_data = gen.next()
for i in range(disc_iters):
_data = next(gen)
_disc_cost, _ = session.run(
[disc_cost, disc_train_op],
feed_dict={real_data: _data}
Expand Down
24 changes: 12 additions & 12 deletions gan_toy.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,12 +132,12 @@ def Discriminator(inputs):
)
clip_disc_weights = tf.group(*clip_ops)

print "Generator params:"
print("Generator params:")
for var in lib.params_with_name('Generator'):
print "\t{}\t{}".format(var.name, var.get_shape())
print "Discriminator params:"
print("\t{}\t{}".format(var.name, var.get_shape()))
print("Discriminator params:")
for var in lib.params_with_name('Discriminator'):
print "\t{}\t{}".format(var.name, var.get_shape())
print("\t{}\t{}".format(var.name, var.get_shape()))

frame_index = [0]
def generate_image(true_dist):
Expand Down Expand Up @@ -174,9 +174,9 @@ def inf_train_gen():
if DATASET == '25gaussians':

dataset = []
for i in xrange(100000/25):
for x in xrange(-2, 3):
for y in xrange(-2, 3):
for i in range(100000/25):
for x in range(-2, 3):
for y in range(-2, 3):
point = np.random.randn(2)*0.05
point[0] += 2*x
point[1] += 2*y
Expand All @@ -185,7 +185,7 @@ def inf_train_gen():
np.random.shuffle(dataset)
dataset /= 2.828 # stdev
while True:
for i in xrange(len(dataset)/BATCH_SIZE):
for i in range(len(dataset)/BATCH_SIZE):
yield dataset[i*BATCH_SIZE:(i+1)*BATCH_SIZE]

elif DATASET == 'swissroll':
Expand Down Expand Up @@ -215,7 +215,7 @@ def inf_train_gen():
centers = [(scale*x,scale*y) for x,y in centers]
while True:
dataset = []
for i in xrange(BATCH_SIZE):
for i in range(BATCH_SIZE):
point = np.random.randn(2)*.02
center = random.choice(centers)
point[0] += center[0]
Expand All @@ -229,13 +229,13 @@ def inf_train_gen():
with tf.Session() as session:
session.run(tf.initialize_all_variables())
gen = inf_train_gen()
for iteration in xrange(ITERS):
for iteration in range(ITERS):
# Train generator
if iteration > 0:
_ = session.run(gen_train_op)
# Train critic
for i in xrange(CRITIC_ITERS):
_data = gen.next()
for i in range(CRITIC_ITERS):
_data = next(gen)
_disc_cost, _ = session.run(
[disc_cost, disc_train_op],
feed_dict={real_data: _data}
Expand Down
14 changes: 7 additions & 7 deletions language_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def __init__(self, n, samples, tokenize=False):
def ngrams(self):
n = self._n
for sample in self._samples:
for i in xrange(len(sample)-n+1):
for i in range(len(sample)-n+1):
yield sample[i:i+n]

def unique_ngrams(self):
Expand Down Expand Up @@ -86,15 +86,15 @@ def js_with(self, p):
return 0.5*(kl_p_m + kl_q_m) / np.log(2)

def load_dataset(max_length, max_n_examples, tokenize=False, max_vocab_size=2048, data_dir='/home/ishaan/data/1-billion-word-language-modeling-benchmark-r13output'):
print "loading dataset..."
print("loading dataset...")

lines = []

finished = False

for i in xrange(99):
for i in range(99):
path = data_dir+("/training-monolingual.tokenized.shuffled/news.en-{}-of-00100".format(str(i+1).zfill(5)))
with open(path, 'r') as f:
with open(path, 'r', encoding='utf-8') as f:
for line in f:
line = line[:-1]
if tokenize:
Expand Down Expand Up @@ -136,8 +136,8 @@ def load_dataset(max_length, max_n_examples, tokenize=False, max_vocab_size=2048
filtered_line.append('unk')
filtered_lines.append(tuple(filtered_line))

for i in xrange(100):
print filtered_lines[i]
for i in range(100):
print(filtered_lines[i])

print "loaded {} lines in dataset".format(len(lines))
print("loaded {} lines in dataset".format(len(lines)))
return filtered_lines, charmap, inv_charmap
Loading