From cadc3ef2f02f280b2e5261d2ce149ce33c36349a Mon Sep 17 00:00:00 2001 From: Will Breaden Madden Date: Thu, 20 Aug 2015 23:37:07 +0200 Subject: [PATCH] Make whitespace consistent, change string formatting from modulo to format --- dream.ipynb | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/dream.ipynb b/dream.ipynb index 39b5037..e38c7e2 100644 --- a/dream.ipynb +++ b/dream.ipynb @@ -93,7 +93,7 @@ "\n", "net = caffe.Classifier('tmp.prototxt', param_fn,\n", " mean = np.float32([104.0, 116.0, 122.0]), # ImageNet mean, training set dependent\n", - " channel_swap = (2,1,0)) # the reference model has channels in BGR order instead of RGB\n", + " channel_swap = (2, 1, 0)) # the reference model has channels in BGR order instead of RGB\n", "\n", "# a couple of utility functions for converting to and from Caffe's input image layout\n", "def preprocess(net, img):\n", @@ -194,7 +194,7 @@ " # prepare base images for all octaves\n", " octaves = [preprocess(net, base_img)]\n", " for i in xrange(octave_n-1):\n", - " octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))\n", + " octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale, 1.0/octave_scale), order=1))\n", " \n", " src = net.blobs['data']\n", " detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details\n", @@ -203,7 +203,7 @@ " if octave > 0:\n", " # upscale details from the previous octave\n", " h1, w1 = detail.shape[-2:]\n", - " detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)\n", + " detail = nd.zoom(detail, (1, 1.0*h/h1, 1.0*w/w1), order=1)\n", "\n", " src.reshape(1,3,h,w) # resize the network's input image size\n", " src.data[0] = octave_base+detail\n", @@ -416,7 +416,7 @@ "s = 0.05 # scale coefficient\n", "for i in xrange(100):\n", " frame = deepdream(net, frame)\n", - " PIL.Image.fromarray(np.uint8(frame)).save(\"frames/%04d.jpg\"%frame_i)\n", + " PIL.Image.fromarray(np.uint8(frame)).save(\"frames/{index}.jpg\".format(index = frame_i))\n", " frame = nd.affine_transform(frame, [1-s,1-s,1], [h*s/2,w*s/2,0], order=1)\n", " frame_i += 1" ], @@ -511,7 +511,7 @@ "end = 'inception_3b/output'\n", "h, w = guide.shape[:2]\n", "src, dst = net.blobs['data'], net.blobs[end]\n", - "src.reshape(1,3,h,w)\n", + "src.reshape(1, 3, h, w)\n", "src.data[0] = preprocess(net, guide)\n", "net.forward(end=end)\n", "guide_features = dst.data[0].copy()" @@ -536,10 +536,10 @@ " x = dst.data[0].copy()\n", " y = guide_features\n", " ch = x.shape[0]\n", - " x = x.reshape(ch,-1)\n", - " y = y.reshape(ch,-1)\n", + " x = x.reshape(ch, -1)\n", + " y = y.reshape(ch, -1)\n", " A = x.T.dot(y) # compute the matrix of dot-products with guide features\n", - " dst.diff[0].reshape(ch,-1)[:] = y[:,A.argmax(1)] # select ones that match best\n", + " dst.diff[0].reshape(ch, -1)[:] = y[:, A.argmax(1)] # select ones that match best\n", "\n", "_=deepdream(net, img, end=end, objective=objective_guide)" ], @@ -575,4 +575,4 @@ "metadata": {} } ] -} +} \ No newline at end of file