diff --git a/.gitignore b/.gitignore index 977860d6..44740a36 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ #dropbox stuff *.dropbox* +.idea/* # Byte-compiled / optimized / DLL files __pycache__/ @@ -58,3 +59,10 @@ docs/_build/ # PyBuilder target/ + +# Pycharm +.idea/* + + +#Notebook stuff +notebooks/.ipynb_checkpoints/ diff --git a/MLP2022_23_CW2_Spec.pdf b/MLP2022_23_CW2_Spec.pdf new file mode 100644 index 00000000..0fc690e7 Binary files /dev/null and b/MLP2022_23_CW2_Spec.pdf differ diff --git a/README.md b/README.md index e25cd6c9..8870d148 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Machine Learning Practical -This repository contains the code for the University of Edinburgh [School of Informatics](http://www.inf.ed.ac.uk) course [Machine Learning Practical](http://www.inf.ed.ac.uk/teaching/courses/mlp/). +This repository contains the code for the University of Edinburgh [School of Informatics](http://www.inf.ed.ac.uk) course Machine Learning Practical. This assignment-based course is focused on the implementation and evaluation of machine learning systems. Students who do this course will have experience in the design, implementation, training, and evaluation of machine learning systems. @@ -16,3 +16,4 @@ If you are working remotely, follow this [guide](notes/remote-working-guide.md). ## Getting set up Detailed instructions for setting up a development environment for the course are given in [this file](notes/environment-set-up.md). Students doing the course will spend part of the first lab getting their own environment set up. + diff --git a/VGG_08/result_outputs/summary.csv b/VGG_08/result_outputs/summary.csv new file mode 100644 index 00000000..d4c46384 --- /dev/null +++ b/VGG_08/result_outputs/summary.csv @@ -0,0 +1,102 @@ +train_acc,train_loss,val_acc,val_loss +0.010694736842105264,4.827323,0.024800000000000003,4.5659676 +0.03562105263157895,4.3888855,0.0604,4.136276 +0.0757684210526316,3.998175,0.09480000000000001,3.8678854 +0.10734736842105265,3.784943,0.12159999999999999,3.6687074 +0.13741052631578948,3.6023798,0.15439999999999998,3.4829779 +0.16888421052631578,3.4196754,0.1864,3.3093607 +0.1941263157894737,3.2674048,0.20720000000000002,3.2223148 +0.21861052631578948,3.139925,0.22880000000000003,3.1171055 +0.24134736842105264,3.0145736,0.24760000000000001,3.0554724 +0.26399999999999996,2.9004965,0.2552,2.9390912 +0.27898947368421056,2.815607,0.2764,2.9205213 +0.29532631578947366,2.7256868,0.2968,2.7410471 +0.31138947368421044,2.6567938,0.3016,2.7083752 +0.3236842105263158,2.595405,0.322,2.665904 +0.33486315789473686,2.5434496,0.3176,2.688214 +0.3462526315789474,2.5021079,0.33159999999999995,2.648656 +0.35381052631578946,2.4609485,0.342,2.5658453 +0.36157894736842106,2.4152951,0.34119999999999995,2.5403407 +0.36774736842105266,2.382958,0.3332,2.6936982 +0.37753684210526317,2.3510027,0.36160000000000003,2.4663532 +0.38597894736842114,2.319616,0.3608,2.4559999 +0.3912421052631579,2.294115,0.3732,2.3644555 +0.39840000000000003,2.2598042,0.3716,2.4516551 +0.4036,2.2318766,0.37439999999999996,2.4189563 +0.4105263157894737,2.2035582,0.3772,2.3899698 +0.41501052631578944,2.1830406,0.3876,2.3215945 +0.4193263157894737,2.158597,0.37800000000000006,2.3831298 +0.4211578947368421,2.148888,0.38160000000000005,2.3436418 +0.4260842105263159,2.1250536,0.39840000000000003,2.3471045 +0.4313684210526315,2.107519,0.4044,2.2744477 +0.4370526315789474,2.0837262,0.398,2.245617 +0.439642105263158,2.0691078,0.41200000000000003,2.216309 +0.4440842105263158,2.046351,0.4096,2.2329648 +0.44696842105263157,2.0330904,0.4104,2.1841388 +0.4518105263157895,2.0200553,0.4244,2.1780539 +0.45298947368421055,2.0069249,0.42719999999999997,2.1625984 +0.4602105263157895,1.9896894,0.4204,2.2195568 +0.46023157894736844,1.9788533,0.4244,2.1803434 +0.46101052631578954,1.9693571,0.4128,2.1858895 +0.46774736842105263,1.9547894,0.4204,2.1908271 +0.4671157894736842,1.9390026,0.4244,2.1841395 +0.4698105263157895,1.924038,0.424,2.1843896 +0.4738736842105264,1.9161719,0.43,2.154806 +0.47541052631578945,1.9033127,0.4463999999999999,2.1130056 +0.48,1.8961077,0.44439999999999996,2.113019 +0.48456842105263154,1.8838875,0.43079999999999996,2.1191697 +0.4857263157894737,1.8711865,0.44920000000000004,2.1213412 +0.4887578947368421,1.8590263,0.44799999999999995,2.1077166 +0.49035789473684216,1.8479114,0.4428,2.0737479 +0.4908421052631579,1.845268,0.4436,2.07655 +0.4939368421052632,1.8336699,0.4548,2.0769904 +0.49924210526315793,1.8237538,0.4548,2.061769 +0.49677894736842104,1.8111013,0.44240000000000007,2.0676718 +0.5008842105263157,1.8031327,0.4548,2.0859065 +0.5,1.8026625,0.458,2.0704215 +0.5030736842105263,1.792004,0.4596,2.1113508 +0.505578947368421,1.7810374,0.45679999999999993,2.0382714 +0.5090315789473684,1.7691813,0.4444000000000001,2.0911386 +0.512042105263158,1.7633294,0.4616,2.0458508 +0.5142736842105263,1.7549652,0.4464,2.0786576 +0.5128421052631579,1.7518128,0.4656,2.026332 +0.518042105263158,1.7420768,0.46,2.0141299 +0.5182315789473684,1.7321203,0.45960000000000006,2.0226884 +0.5192842105263158,1.7264535,0.46279999999999993,2.0182638 +0.5217894736842105,1.7245325,0.46399999999999997,2.0110855 +0.5229684210526316,1.7184331,0.46679999999999994,2.0191038 +0.5227578947368421,1.7116771,0.4604,2.0334535 +0.5245894736842105,1.7009526,0.4692,2.0072439 +0.5262315789473684,1.6991171,0.4700000000000001,2.0296187 +0.5278526315789474,1.6958193,0.4708,1.9912667 +0.527157894736842,1.6907407,0.4736,2.006095 +0.5299578947368421,1.6808176,0.4715999999999999,2.012164 +0.5313052631578947,1.676356,0.47239999999999993,1.9955354 +0.5338315789473685,1.6731659,0.47839999999999994,2.005768 +0.5336000000000001,1.662152,0.4672,2.015392 +0.5354736842105263,1.6638054,0.4692,1.9890119 +0.5397894736842105,1.6575475,0.4768,2.0090258 +0.5386526315789474,1.6595734,0.4824,1.9728817 +0.5376631578947368,1.6536722,0.4816,1.9769167 +0.5384842105263159,1.6495628,0.47600000000000003,1.9980135 +0.5380842105263157,1.6488388,0.478,1.9884782 +0.5393473684210528,1.6408547,0.48,1.9772192 +0.5415157894736843,1.632917,0.4828,1.9732709 +0.5394947368421052,1.6340653,0.4776,1.9623082 +0.5429052631578948,1.6340532,0.47759999999999997,1.9812362 +0.5452421052631579,1.6246406,0.48119999999999996,1.9846246 +0.5436210526315789,1.6288266,0.4864,1.9822198 +0.5437684210526316,1.6240481,0.48279999999999995,1.9768158 +0.546357894736842,1.6208181,0.4804,1.9625885 +0.5485052631578946,1.6164333,0.47839999999999994,1.9738724 +0.5466736842105263,1.6169226,0.47800000000000004,1.9842362 +0.547621052631579,1.6159856,0.4828,1.9709526 +0.5480421052631579,1.6175526,0.48560000000000003,1.967775 +0.5468421052631579,1.6149833,0.48119999999999996,1.9626708 +0.5493894736842105,1.6063902,0.4835999999999999,1.96621 +0.5490736842105263,1.6096952,0.48120000000000007,1.9742922 +0.5514736842105264,1.6084315,0.4867999999999999,1.9604725 +0.5489263157894737,1.6069487,0.4831999999999999,1.9733659 +0.5494947368421053,1.6030664,0.49079999999999996,1.9693874 +0.5516842105263158,1.6043342,0.486,1.9647765 +0.552442105263158,1.6039867,0.48480000000000006,1.9649359 diff --git a/VGG_08/result_outputs/test_summary.csv b/VGG_08/result_outputs/test_summary.csv new file mode 100644 index 00000000..f19765f0 --- /dev/null +++ b/VGG_08/result_outputs/test_summary.csv @@ -0,0 +1,2 @@ +test_acc,test_loss +0.49950000000000006,1.9105633 diff --git a/VGG_38/result_outputs/summary.csv b/VGG_38/result_outputs/summary.csv new file mode 100644 index 00000000..e5a38853 --- /dev/null +++ b/VGG_38/result_outputs/summary.csv @@ -0,0 +1,101 @@ +train_acc,train_loss,val_acc,val_loss +0.009263157894736843,4.8649125,0.0104,4.630689 +0.009810526315789474,4.6264124,0.009600000000000001,4.618983 +0.009705263157894738,4.621914,0.011200000000000002,4.6184525 +0.008989473684210525,4.619472,0.0064,4.6164784 +0.009747368421052633,4.6168556,0.0076,4.6138463 +0.00951578947368421,4.6156826,0.0108,4.6139345 +0.009789473684210525,4.614809,0.008400000000000001,4.6116896 +0.009936842105263159,4.613147,0.0104,4.6148276 +0.009810526315789474,4.612325,0.0076,4.6123877 +0.009094736842105263,4.6117926,0.007200000000000001,4.6149993 +0.008421052631578947,4.611283,0.011600000000000001,4.6114736 +0.009010526315789472,4.6105323,0.009600000000000001,4.607559 +0.009894736842105263,4.6103206,0.008400000000000001,4.6086206 +0.00934736842105263,4.6095214,0.011200000000000002,4.6091933 +0.009473684210526316,4.6095295,0.008,4.6095695 +0.010252631578947369,4.609189,0.0104,4.610459 +0.009536842105263158,4.6087623,0.0092,4.6091356 +0.00848421052631579,4.6086617,0.009600000000000001,4.609126 +0.008421052631578947,4.6083455,0.011200000000000002,4.6088147 +0.009410526315789473,4.608145,0.0068000000000000005,4.608519 +0.009263157894736843,4.6078997,0.0092,4.6085033 +0.009389473684210526,4.607453,0.01,4.6083508 +0.008989473684210528,4.6075597,0.008400000000000001,4.6073136 +0.009326315789473686,4.607266,0.008,4.6069093 +0.01,4.607154,0.0076,4.6069508 +0.008778947368421053,4.607089,0.011200000000000002,4.60659 +0.009326315789473684,4.606807,0.0068,4.6072598 +0.009031578947368422,4.6068263,0.011200000000000002,4.607257 +0.008842105263157896,4.6066294,0.008,4.606883 +0.008968421052631579,4.606647,0.006400000000000001,4.607275 +0.008947368421052631,4.6065364,0.0092,4.606976 +0.008842105263157896,4.6064167,0.0076,4.607016 +0.008799999999999999,4.606425,0.0096,4.607184 +0.009326315789473686,4.606305,0.0072,4.6068683 +0.00905263157894737,4.606274,0.0072,4.606982 +0.00934736842105263,4.6062336,0.007200000000000001,4.607209 +0.009221052631578948,4.606221,0.0076,4.607369 +0.009557894736842105,4.60607,0.0076,4.6074376 +0.009073684210526317,4.6061006,0.0072,4.607068 +0.009242105263157895,4.606005,0.0064,4.6067224 +0.009957894736842107,4.605986,0.0072,4.6068263 +0.009052631578947368,4.605935,0.0072,4.6067867 +0.008694736842105264,4.6059127,0.0064,4.6070905 +0.009536842105263158,4.605874,0.006400000000000001,4.606976 +0.009663157894736842,4.605872,0.0072,4.6068897 +0.008821052631578948,4.6057997,0.0064,4.607028 +0.009768421052631579,4.605778,0.0072,4.6069264 +0.0092,4.6057644,0.007200000000000001,4.607018 +0.008926315789473685,4.6057386,0.0072,4.60698 +0.008989473684210525,4.6057277,0.0064,4.6070237 +0.009242105263157895,4.6057053,0.0064,4.6069183 +0.009094736842105263,4.605692,0.006400000000000001,4.6068764 +0.009473684210526316,4.60566,0.0064,4.606909 +0.009494736842105262,4.605613,0.0064,4.606978 +0.009747368421052631,4.6056285,0.0064,4.606753 +0.009789473684210527,4.605578,0.006400000000000001,4.6068797 +0.009199999999999998,4.6055675,0.0064,4.606888 +0.009073684210526317,4.6055593,0.0064,4.606874 +0.008821052631578948,4.6055293,0.006400000000000001,4.606851 +0.009326315789473684,4.6055255,0.0064,4.606871 +0.009557894736842105,4.6055083,0.006400000000000001,4.606851 +0.009600000000000001,4.605491,0.0064,4.6068635 +0.00856842105263158,4.605466,0.0064,4.606862 +0.009894736842105263,4.605463,0.006400000000000001,4.6068873 +0.009494736842105262,4.605441,0.0064,4.6068926 +0.008673684210526314,4.6054277,0.0064,4.6068554 +0.009221052631578948,4.6054296,0.0063999999999999994,4.6068907 +0.008989473684210528,4.605404,0.0064,4.6068807 +0.00928421052631579,4.6053905,0.006400000000000001,4.6068707 +0.0092,4.6053743,0.0064,4.606894 +0.008989473684210525,4.605368,0.0064,4.606845 +0.009515789473684212,4.605355,0.0064,4.6068635 +0.009073684210526317,4.605352,0.0064,4.6068773 +0.009642105263157895,4.6053243,0.0064,4.606883 +0.009747368421052633,4.6053176,0.0064,4.6069 +0.009873684210526316,4.6053023,0.0064,4.6068873 +0.009536842105263156,4.605297,0.0064,4.6068654 +0.009515789473684212,4.6052866,0.0064,4.6068883 +0.009978947368421053,4.605265,0.006400000000000001,4.606894 +0.009957894736842107,4.605259,0.0064,4.6068826 +0.009410526315789475,4.6052504,0.0064,4.6068697 +0.01002105263157895,4.6052403,0.006400000000000001,4.6068807 +0.01002105263157895,4.6052313,0.0064,4.606872 +0.00951578947368421,4.605224,0.0064,4.6068883 +0.009852631578947368,4.605219,0.006400000000000001,4.606871 +0.009894736842105265,4.605209,0.0064,4.606871 +0.00922105263157895,4.605204,0.0064,4.6068654 +0.010042105263157896,4.605193,0.0064,4.6068764 +0.009978947368421053,4.6051874,0.006400000000000001,4.6068697 +0.009747368421052633,4.605183,0.0064,4.6068673 +0.010189473684210526,4.605178,0.0064,4.606873 +0.009789473684210527,4.605173,0.0064,4.6068773 +0.009936842105263159,4.605169,0.0064,4.606874 +0.010042105263157894,4.605166,0.0064,4.606877 +0.009494736842105262,4.6051593,0.0064,4.606874 +0.009536842105263158,4.6051593,0.0063999999999999994,4.606874 +0.010021052631578946,4.6051564,0.006400000000000001,4.6068716 +0.009747368421052631,4.605154,0.0064,4.6068726 +0.009642105263157895,4.605153,0.0064,4.606872 +0.009305263157894737,4.6051517,0.0064,4.6068726 diff --git a/VGG_38/result_outputs/test_summary.csv b/VGG_38/result_outputs/test_summary.csv new file mode 100644 index 00000000..bf44c98a --- /dev/null +++ b/VGG_38/result_outputs/test_summary.csv @@ -0,0 +1,2 @@ +test_acc,test_loss +0.01,4.608619 diff --git a/data/ccpp_data.npz b/data/ccpp_data.npz new file mode 100644 index 00000000..a507ba23 Binary files /dev/null and b/data/ccpp_data.npz differ diff --git a/data/emnist-test.npz b/data/emnist-test.npz new file mode 100644 index 00000000..05df1d80 Binary files /dev/null and b/data/emnist-test.npz differ diff --git a/data/emnist-train.npz b/data/emnist-train.npz new file mode 100644 index 00000000..177a30cd Binary files /dev/null and b/data/emnist-train.npz differ diff --git a/data/emnist-valid.npz b/data/emnist-valid.npz new file mode 100644 index 00000000..87183dd9 Binary files /dev/null and b/data/emnist-valid.npz differ diff --git a/install.sh b/install.sh new file mode 100644 index 00000000..c43e5e95 --- /dev/null +++ b/install.sh @@ -0,0 +1,2 @@ +conda install tqdm +conda install pytorch torchvision cudatoolkit=10.1 -c pytorch \ No newline at end of file diff --git a/mlp/__init__.py b/mlp/__init__.py index b41e6673..73c9478f 100644 --- a/mlp/__init__.py +++ b/mlp/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """Machine Learning Practical package.""" -__authors__ = ['Pawel Swietojanski', 'Steve Renals', 'Matt Graham'] +__authors__ = ['Pawel Swietojanski', 'Steve Renals', 'Matt Graham', 'Antreas Antoniou'] DEFAULT_SEED = 123456 # Default random number generator seed if none provided. diff --git a/mlp/data_providers.py b/mlp/data_providers.py index cd486a52..a4c72a00 100644 --- a/mlp/data_providers.py +++ b/mlp/data_providers.py @@ -7,8 +7,17 @@ import pickle import gzip +import sys + import numpy as np import os + +from PIL import Image +from torch.utils import data +from torch.utils.data import Dataset +from torchvision import transforms +from torchvision.datasets.utils import download_url, check_integrity + from mlp import DEFAULT_SEED @@ -35,23 +44,54 @@ def __init__(self, inputs, targets, batch_size, max_num_batches=-1, """ self.inputs = inputs self.targets = targets - self.batch_size = batch_size - assert max_num_batches != 0 and not max_num_batches < -1, ( - 'max_num_batches should be -1 or > 0') - self.max_num_batches = max_num_batches + if batch_size < 1: + raise ValueError('batch_size must be >= 1') + self._batch_size = batch_size + if max_num_batches == 0 or max_num_batches < -1: + raise ValueError('max_num_batches must be -1 or > 0') + self._max_num_batches = max_num_batches + self._update_num_batches() + self.shuffle_order = shuffle_order + self._current_order = np.arange(inputs.shape[0]) + if rng is None: + rng = np.random.RandomState(DEFAULT_SEED) + self.rng = rng + self.new_epoch() + + @property + def batch_size(self): + """Number of data points to include in each batch.""" + return self._batch_size + + @batch_size.setter + def batch_size(self, value): + if value < 1: + raise ValueError('batch_size must be >= 1') + self._batch_size = value + self._update_num_batches() + + @property + def max_num_batches(self): + """Maximum number of batches to iterate over in an epoch.""" + return self._max_num_batches + + @max_num_batches.setter + def max_num_batches(self, value): + if value == 0 or value < -1: + raise ValueError('max_num_batches must be -1 or > 0') + self._max_num_batches = value + self._update_num_batches() + + def _update_num_batches(self): + """Updates number of batches to iterate over.""" # maximum possible number of batches is equal to number of whole times # batch_size divides in to the number of data points which can be # found using integer division - possible_num_batches = self.inputs.shape[0] // batch_size + possible_num_batches = self.inputs.shape[0] // self.batch_size if self.max_num_batches == -1: self.num_batches = possible_num_batches else: self.num_batches = min(self.max_num_batches, possible_num_batches) - self.shuffle_order = shuffle_order - if rng is None: - rng = np.random.RandomState(DEFAULT_SEED) - self.rng = rng - self.reset() def __iter__(self): """Implements Python iterator interface. @@ -63,24 +103,36 @@ def __iter__(self): """ return self - def reset(self): - """Resets the provider to the initial state to use in a new epoch.""" + def new_epoch(self): + """Starts a new epoch (pass through data), possibly shuffling first.""" self._curr_batch = 0 if self.shuffle_order: self.shuffle() + def __next__(self): + return self.next() + + def reset(self): + """Resets the provider to the initial state.""" + inv_perm = np.argsort(self._current_order) + self._current_order = self._current_order[inv_perm] + self.inputs = self.inputs[inv_perm] + self.targets = self.targets[inv_perm] + self.new_epoch() + def shuffle(self): """Randomly shuffles order of data.""" - new_order = self.rng.permutation(self.inputs.shape[0]) - self.inputs = self.inputs[new_order] - self.targets = self.targets[new_order] + perm = self.rng.permutation(self.inputs.shape[0]) + self._current_order = self._current_order[perm] + self.inputs = self.inputs[perm] + self.targets = self.targets[perm] def next(self): """Returns next data batch or raises `StopIteration` if at end.""" if self._curr_batch + 1 > self.num_batches: - # no more batches in current iteration through data set so reset - # the dataset for another pass and indicate iteration is at end - self.reset() + # no more batches in current iteration through data set so start + # new epoch ready for another pass and indicate iteration is at end + self.new_epoch() raise StopIteration() # create an index slice corresponding to current batch number batch_slice = slice(self._curr_batch * self.batch_size, @@ -90,7 +142,6 @@ def next(self): self._curr_batch += 1 return inputs_batch, targets_batch - class MNISTDataProvider(DataProvider): """Data provider for MNIST handwritten digit images.""" @@ -111,7 +162,7 @@ def __init__(self, which_set='train', batch_size=100, max_num_batches=-1, rng (RandomState): A seeded random number generator. """ # check a valid which_set was provided - assert which_set in ['train', 'valid', 'eval'], ( + assert which_set in ['train', 'valid', 'test'], ( 'Expected which_set to be either train, valid or eval. ' 'Got {0}'.format(which_set) ) @@ -133,13 +184,10 @@ def __init__(self, which_set='train', batch_size=100, max_num_batches=-1, super(MNISTDataProvider, self).__init__( inputs, targets, batch_size, max_num_batches, shuffle_order, rng) - # def next(self): - # """Returns next data batch or raises `StopIteration` if at end.""" - # inputs_batch, targets_batch = super(MNISTDataProvider, self).next() - # return inputs_batch, self.to_one_of_k(targets_batch) - # - def __next__(self): - return self.next() + def next(self): + """Returns next data batch or raises `StopIteration` if at end.""" + inputs_batch, targets_batch = super(MNISTDataProvider, self).next() + return inputs_batch, self.to_one_of_k(targets_batch) def to_one_of_k(self, int_targets): """Converts integer coded class target to 1 of K coded targets. @@ -156,15 +204,89 @@ def to_one_of_k(self, int_targets): to zero except for the column corresponding to the correct class which is equal to one. """ - raise NotImplementedError() + one_of_k_targets = np.zeros((int_targets.shape[0], self.num_classes)) + one_of_k_targets[range(int_targets.shape[0]), int_targets] = 1 + return one_of_k_targets +class EMNISTDataProvider(DataProvider): + """Data provider for EMNIST handwritten digit images.""" + + def __init__(self, which_set='train', batch_size=100, max_num_batches=-1, + shuffle_order=True, rng=None, flatten=False): + """Create a new EMNIST data provider object. + + Args: + which_set: One of 'train', 'valid' or 'eval'. Determines which + portion of the EMNIST data this object should provide. + batch_size (int): Number of data points to include in each batch. + max_num_batches (int): Maximum number of batches to iterate over + in an epoch. If `max_num_batches * batch_size > num_data` then + only as many batches as the data can be split into will be + used. If set to -1 all of the data will be used. + shuffle_order (bool): Whether to randomly permute the order of + the data before each epoch. + rng (RandomState): A seeded random number generator. + """ + # check a valid which_set was provided + assert which_set in ['train', 'valid', 'test'], ( + 'Expected which_set to be either train, valid or eval. ' + 'Got {0}'.format(which_set) + ) + self.which_set = which_set + self.num_classes = 47 + # construct path to data using os.path.join to ensure the correct path + # separator for the current platform / OS is used + # MLP_DATA_DIR environment variable should point to the data directory + data_path = os.path.join( + os.environ['MLP_DATA_DIR'], 'emnist-{0}.npz'.format(which_set)) + assert os.path.isfile(data_path), ( + 'Data file does not exist at expected path: ' + data_path + ) + # load data from compressed numpy file + loaded = np.load(data_path) + print(loaded.keys()) + inputs, targets = loaded['inputs'], loaded['targets'] + inputs = inputs.astype(np.float32) + targets = targets.astype(np.int) + if flatten: + inputs = np.reshape(inputs, newshape=(-1, 28*28)) + else: + inputs = np.reshape(inputs, newshape=(-1, 28, 28, 1)) + inputs = inputs / 255.0 + # pass the loaded data to the parent class __init__ + super(EMNISTDataProvider, self).__init__( + inputs, targets, batch_size, max_num_batches, shuffle_order, rng) + + def next(self): + """Returns next data batch or raises `StopIteration` if at end.""" + inputs_batch, targets_batch = super(EMNISTDataProvider, self).next() + return inputs_batch, self.to_one_of_k(targets_batch) + + def to_one_of_k(self, int_targets): + """Converts integer coded class target to 1 of K coded targets. + + Args: + int_targets (ndarray): Array of integer coded class targets (i.e. + where an integer from 0 to `num_classes` - 1 is used to + indicate which is the correct class). This should be of shape + (num_data,). + + Returns: + Array of 1 of K coded targets i.e. an array of shape + (num_data, num_classes) where for each row all elements are equal + to zero except for the column corresponding to the correct class + which is equal to one. + """ + one_of_k_targets = np.zeros((int_targets.shape[0], self.num_classes)) + one_of_k_targets[range(int_targets.shape[0]), int_targets] = 1 + return one_of_k_targets class MetOfficeDataProvider(DataProvider): """South Scotland Met Office weather data provider.""" def __init__(self, window_size, batch_size=10, max_num_batches=-1, shuffle_order=True, rng=None): - """Create a new Met Offfice data provider object. + """Create a new Met Office data provider object. Args: window_size (int): Size of windows to split weather time series @@ -180,27 +302,445 @@ def __init__(self, window_size, batch_size=10, max_num_batches=-1, the data before each epoch. rng (RandomState): A seeded random number generator. """ - self.window_size = window_size - assert window_size > 1, 'window_size must be at least 2.' data_path = os.path.join( os.environ['MLP_DATA_DIR'], 'HadSSP_daily_qc.txt') assert os.path.isfile(data_path), ( 'Data file does not exist at expected path: ' + data_path ) - # load raw data from text file - # ... + raw = np.loadtxt(data_path, skiprows=3, usecols=range(2, 32)) + assert window_size > 1, 'window_size must be at least 2.' + self.window_size = window_size # filter out all missing datapoints and flatten to a vector - # ... + filtered = raw[raw >= 0].flatten() # normalise data to zero mean, unit standard deviation - # ... - # convert from flat sequence to windowed data - # ... + mean = np.mean(filtered) + std = np.std(filtered) + normalised = (filtered - mean) / std + # create a view on to array corresponding to a rolling window + shape = (normalised.shape[-1] - self.window_size + 1, self.window_size) + strides = normalised.strides + (normalised.strides[-1],) + windowed = np.lib.stride_tricks.as_strided( + normalised, shape=shape, strides=strides) # inputs are first (window_size - 1) entries in windows - # inputs = ... + inputs = windowed[:, :-1] # targets are last entry in windows - # targets = ... - # initialise base class with inputs and targets arrays - # super(MetOfficeDataProvider, self).__init__( - # inputs, targets, batch_size, max_num_batches, shuffle_order, rng) - def __next__(self): - return self.next() \ No newline at end of file + targets = windowed[:, -1] + super(MetOfficeDataProvider, self).__init__( + inputs, targets, batch_size, max_num_batches, shuffle_order, rng) + +class CCPPDataProvider(DataProvider): + + def __init__(self, which_set='train', input_dims=None, batch_size=10, + max_num_batches=-1, shuffle_order=True, rng=None): + """Create a new Combined Cycle Power Plant data provider object. + + Args: + which_set: One of 'train' or 'valid'. Determines which portion of + data this object should provide. + input_dims: Which of the four input dimension to use. If `None` all + are used. If an iterable of integers are provided (consisting + of a subset of {0, 1, 2, 3}) then only the corresponding + input dimensions are included. + batch_size (int): Number of data points to include in each batch. + max_num_batches (int): Maximum number of batches to iterate over + in an epoch. If `max_num_batches * batch_size > num_data` then + only as many batches as the data can be split into will be + used. If set to -1 all of the data will be used. + shuffle_order (bool): Whether to randomly permute the order of + the data before each epoch. + rng (RandomState): A seeded random number generator. + """ + data_path = os.path.join( + os.environ['MLP_DATA_DIR'], 'ccpp_data.npz') + assert os.path.isfile(data_path), ( + 'Data file does not exist at expected path: ' + data_path + ) + # check a valid which_set was provided + assert which_set in ['train', 'valid'], ( + 'Expected which_set to be either train or valid ' + 'Got {0}'.format(which_set) + ) + # check input_dims are valid + if not input_dims is not None: + input_dims = set(input_dims) + assert input_dims.issubset({0, 1, 2, 3}), ( + 'input_dims should be a subset of {0, 1, 2, 3}' + ) + loaded = np.load(data_path) + inputs = loaded[which_set + '_inputs'] + if input_dims is not None: + inputs = inputs[:, input_dims] + targets = loaded[which_set + '_targets'] + super(CCPPDataProvider, self).__init__( + inputs, targets, batch_size, max_num_batches, shuffle_order, rng) + +class EMNISTPytorchDataProvider(Dataset): + def __init__(self, which_set='train', batch_size=100, max_num_batches=-1, + shuffle_order=True, rng=None, flatten=False, transforms=None): + self.numpy_data_provider = EMNISTDataProvider(which_set=which_set, batch_size=batch_size, max_num_batches=max_num_batches, + shuffle_order=shuffle_order, rng=rng, flatten=flatten) + self.transforms = transforms + + def __getitem__(self, item): + x = self.numpy_data_provider.inputs[item] + for augmentation in self.transforms: + x = augmentation(x) + return x, int(self.numpy_data_provider.targets[item]) + + def __len__(self): + return len(self.numpy_data_provider.targets) + +class AugmentedMNISTDataProvider(MNISTDataProvider): + """Data provider for MNIST dataset which randomly transforms images.""" + + def __init__(self, which_set='train', batch_size=100, max_num_batches=-1, + shuffle_order=True, rng=None, transformer=None): + """Create a new augmented MNIST data provider object. + + Args: + which_set: One of 'train', 'valid' or 'test'. Determines which + portion of the MNIST data this object should provide. + batch_size (int): Number of data points to include in each batch. + max_num_batches (int): Maximum number of batches to iterate over + in an epoch. If `max_num_batches * batch_size > num_data` then + only as many batches as the data can be split into will be + used. If set to -1 all of the data will be used. + shuffle_order (bool): Whether to randomly permute the order of + the data before each epoch. + rng (RandomState): A seeded random number generator. + transformer: Function which takes an `inputs` array of shape + (batch_size, input_dim) corresponding to a batch of input + images and a `rng` random number generator object (i.e. a + call signature `transformer(inputs, rng)`) and applies a + potentiall random set of transformations to some / all of the + input images as each new batch is returned when iterating over + the data provider. + """ + super(AugmentedMNISTDataProvider, self).__init__( + which_set, batch_size, max_num_batches, shuffle_order, rng) + self.transformer = transformer + + def next(self): + """Returns next data batch or raises `StopIteration` if at end.""" + inputs_batch, targets_batch = super( + AugmentedMNISTDataProvider, self).next() + transformed_inputs_batch = self.transformer(inputs_batch, self.rng) + return transformed_inputs_batch, targets_batch + +class Omniglot(data.Dataset): + """`CIFAR10 `_ Dataset. + Args: + root (string): Root directory of dataset where directory + ``cifar-10-batches-py`` exists or will be saved to if download is set to True. + train (bool, optional): If True, creates dataset from training set, otherwise + creates from test set. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + """ + def collect_data_paths(self, root): + data_dict = dict() + print(root) + for subdir, dir, files in os.walk(root): + for file in files: + if file.endswith('.png'): + filepath = os.path.join(subdir, file) + class_label = '_'.join(subdir.split("/")[-2:]) + if class_label in data_dict: + data_dict[class_label].append(filepath) + else: + data_dict[class_label] = [filepath] + + return data_dict + + def __init__(self, root, set_name, + transform=None, target_transform=None, + download=False): + self.root = os.path.expanduser(root) + self.root = os.path.abspath(os.path.join(self.root, 'omniglot_dataset')) + self.transform = transform + self.target_transform = target_transform + self.set_name = set_name # training set or test set + self.data_dict = self.collect_data_paths(root=self.root) + + x = [] + label_to_idx = {label: idx for idx, label in enumerate(self.data_dict.keys())} + y = [] + + for key, value in self.data_dict.items(): + x.extend(value) + y.extend(len(value) * [label_to_idx[key]]) + + y = np.array(y) + + + rng = np.random.RandomState(seed=0) + + idx = np.arange(len(x)) + rng.shuffle(idx) + + x = [x[current_idx] for current_idx in idx] + y = y[idx] + + train_sample_idx = rng.choice(a=[i for i in range(len(x))], size=int(len(x) * 0.80), replace=False) + evaluation_sample_idx = [i for i in range(len(x)) if i not in train_sample_idx] + validation_sample_idx = rng.choice(a=[i for i in range(len(evaluation_sample_idx))], size=int(len(evaluation_sample_idx) * 0.40), replace=False) + test_sample_idx = [i for i in range(len(evaluation_sample_idx)) if i not in evaluation_sample_idx] + + if self.set_name is 'train': + self.data = [item for idx, item in enumerate(x) if idx in train_sample_idx] + self.labels = y[train_sample_idx] + + elif self.set_name is 'val': + self.data = [item for idx, item in enumerate(x) if idx in validation_sample_idx] + self.labels = y[validation_sample_idx] + + else: + self.data = [item for idx, item in enumerate(x) if idx in test_sample_idx] + self.labels = y[test_sample_idx] + + def __getitem__(self, index): + """ + Args: + index (int): Index + Returns: + tuple: (image, target) where target is index of the target class. + """ + img, target = self.data[index], self.labels[index] + + img = Image.open(img) + img.show() + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self): + return len(self.data) + + + def __repr__(self): + fmt_str = 'Dataset ' + self.__class__.__name__ + '\n' + fmt_str += ' Number of datapoints: {}\n'.format(self.__len__()) + tmp = self.set_name + fmt_str += ' Split: {}\n'.format(tmp) + fmt_str += ' Root Location: {}\n'.format(self.root) + tmp = ' Transforms (if any): ' + fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp))) + tmp = ' Target Transforms (if any): ' + fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp))) + return fmt_str + +class CIFAR10(data.Dataset): + """`CIFAR10 `_ Dataset. + Args: + root (string): Root directory of dataset where directory + ``cifar-10-batches-py`` exists or will be saved to if download is set to True. + train (bool, optional): If True, creates dataset from training set, otherwise + creates from test set. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + """ + base_folder = 'cifar-10-batches-py' + url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz" + filename = "cifar-10-python.tar.gz" + tgz_md5 = 'c58f30108f718f92721af3b95e74349a' + train_list = [ + ['data_batch_1', 'c99cafc152244af753f735de768cd75f'], + ['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'], + ['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'], + ['data_batch_4', '634d18415352ddfa80567beed471001a'], + ['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'], + ] + + test_list = [ + ['test_batch', '40351d587109b95175f43aff81a1287e'], + ] + + def __init__(self, root, set_name, + transform=None, target_transform=None, + download=False): + self.root = os.path.expanduser(root) + self.transform = transform + self.target_transform = target_transform + self.set_name = set_name # training set or test set + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + # now load the picked numpy arrays + rng = np.random.RandomState(seed=0) + + train_sample_idx = rng.choice(a=[i for i in range(50000)], size=47500, replace=False) + val_sample_idx = [i for i in range(50000) if i not in train_sample_idx] + + if self.set_name is 'train': + self.data = [] + self.labels = [] + for fentry in self.train_list: + f = fentry[0] + file = os.path.join(self.root, self.base_folder, f) + fo = open(file, 'rb') + if sys.version_info[0] == 2: + entry = pickle.load(fo) + else: + entry = pickle.load(fo, encoding='latin1') + self.data.append(entry['data']) + if 'labels' in entry: + self.labels += entry['labels'] + else: + self.labels += entry['fine_labels'] + fo.close() + + self.data = np.concatenate(self.data) + + self.data = self.data.reshape((50000, 3, 32, 32)) + self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC + self.data = self.data[train_sample_idx] + self.labels = np.array(self.labels)[train_sample_idx] + print(set_name, self.data.shape) + print(set_name, self.labels.shape) + + elif self.set_name is 'val': + self.data = [] + self.labels = [] + for fentry in self.train_list: + f = fentry[0] + file = os.path.join(self.root, self.base_folder, f) + fo = open(file, 'rb') + if sys.version_info[0] == 2: + entry = pickle.load(fo) + else: + entry = pickle.load(fo, encoding='latin1') + self.data.append(entry['data']) + if 'labels' in entry: + self.labels += entry['labels'] + else: + self.labels += entry['fine_labels'] + fo.close() + + self.data = np.concatenate(self.data) + self.data = self.data.reshape((50000, 3, 32, 32)) + self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC + self.data = self.data[val_sample_idx] + self.labels = np.array(self.labels)[val_sample_idx] + print(set_name, self.data.shape) + print(set_name, self.labels.shape) + + else: + f = self.test_list[0][0] + file = os.path.join(self.root, self.base_folder, f) + fo = open(file, 'rb') + if sys.version_info[0] == 2: + entry = pickle.load(fo) + else: + entry = pickle.load(fo, encoding='latin1') + self.data = entry['data'] + if 'labels' in entry: + self.labels = entry['labels'] + else: + self.labels = entry['fine_labels'] + fo.close() + self.data = self.data.reshape((10000, 3, 32, 32)) + self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC + self.labels = np.array(self.labels) + print(set_name, self.data.shape) + print(set_name, self.labels.shape) + + def __getitem__(self, index): + """ + Args: + index (int): Index + Returns: + tuple: (image, target) where target is index of the target class. + """ + img, target = self.data[index], self.labels[index] + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + + img = Image.fromarray(img) + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self): + return len(self.data) + + def _check_integrity(self): + root = self.root + for fentry in (self.train_list + self.test_list): + filename, md5 = fentry[0], fentry[1] + fpath = os.path.join(root, self.base_folder, filename) + if not check_integrity(fpath, md5): + return False + return True + + def download(self): + import tarfile + + if self._check_integrity(): + print('Files already downloaded and verified') + return + + root = self.root + download_url(self.url, root, self.filename, self.tgz_md5) + + # extract file + cwd = os.getcwd() + tar = tarfile.open(os.path.join(root, self.filename), "r:gz") + os.chdir(root) + tar.extractall() + tar.close() + os.chdir(cwd) + + def __repr__(self): + fmt_str = 'Dataset ' + self.__class__.__name__ + '\n' + fmt_str += ' Number of datapoints: {}\n'.format(self.__len__()) + tmp = self.set_name + fmt_str += ' Split: {}\n'.format(tmp) + fmt_str += ' Root Location: {}\n'.format(self.root) + tmp = ' Transforms (if any): ' + fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp))) + tmp = ' Target Transforms (if any): ' + fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp))) + return fmt_str + + +class CIFAR100(CIFAR10): + """`CIFAR100 `_ Dataset. + This is a subclass of the `CIFAR10` Dataset. + """ + base_folder = 'cifar-100-python' + url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz" + filename = "cifar-100-python.tar.gz" + tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85' + train_list = [ + ['train', '16019d7e3df5f24257cddd939b257f8d'], + ] + + test_list = [ + ['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'], + ] \ No newline at end of file diff --git a/mlp/errors.py b/mlp/errors.py new file mode 100644 index 00000000..3f0ae4f7 --- /dev/null +++ b/mlp/errors.py @@ -0,0 +1,176 @@ +# -*- coding: utf-8 -*- +"""Error functions. + +This module defines error functions, with the aim of model training being to +minimise the error function given a set of inputs and target outputs. + +The error functions will typically measure some concept of distance between the +model outputs and target outputs, averaged over all data points in the data set +or batch. +""" + +import numpy as np + + +class SumOfSquaredDiffsError(object): + """Sum of squared differences (squared Euclidean distance) error.""" + + def __call__(self, outputs, targets): + """Calculates error function given a batch of outputs and targets. + + Args: + outputs: Array of model outputs of shape (batch_size, output_dim). + targets: Array of target outputs of shape (batch_size, output_dim). + + Returns: + Scalar cost function value. + """ + return 0.5 * np.mean(np.sum((outputs - targets)**2, axis=1)) + + def grad(self, outputs, targets): + """Calculates gradient of error function with respect to outputs. + + Args: + outputs: Array of model outputs of shape (batch_size, output_dim). + targets: Array of target outputs of shape (batch_size, output_dim). + + Returns: + Gradient of error function with respect to outputs. + """ + return (outputs - targets) / outputs.shape[0] + + def __repr__(self): + return 'MeanSquaredErrorCost' + + +class BinaryCrossEntropyError(object): + """Binary cross entropy error.""" + + def __call__(self, outputs, targets): + """Calculates error function given a batch of outputs and targets. + + Args: + outputs: Array of model outputs of shape (batch_size, output_dim). + targets: Array of target outputs of shape (batch_size, output_dim). + + Returns: + Scalar error function value. + """ + return -np.mean( + targets * np.log(outputs) + (1. - targets) * np.log(1. - ouputs)) + + def grad(self, outputs, targets): + """Calculates gradient of error function with respect to outputs. + + Args: + outputs: Array of model outputs of shape (batch_size, output_dim). + targets: Array of target outputs of shape (batch_size, output_dim). + + Returns: + Gradient of error function with respect to outputs. + """ + return ((1. - targets) / (1. - outputs) - + (targets / outputs)) / outputs.shape[0] + + def __repr__(self): + return 'BinaryCrossEntropyError' + + +class BinaryCrossEntropySigmoidError(object): + """Binary cross entropy error with logistic sigmoid applied to outputs.""" + + def __call__(self, outputs, targets): + """Calculates error function given a batch of outputs and targets. + + Args: + outputs: Array of model outputs of shape (batch_size, output_dim). + targets: Array of target outputs of shape (batch_size, output_dim). + + Returns: + Scalar error function value. + """ + probs = 1. / (1. + np.exp(-outputs)) + return -np.mean( + targets * np.log(probs) + (1. - targets) * np.log(1. - probs)) + + def grad(self, outputs, targets): + """Calculates gradient of error function with respect to outputs. + + Args: + outputs: Array of model outputs of shape (batch_size, output_dim). + targets: Array of target outputs of shape (batch_size, output_dim). + + Returns: + Gradient of error function with respect to outputs. + """ + probs = 1. / (1. + np.exp(-outputs)) + return (probs - targets) / outputs.shape[0] + + def __repr__(self): + return 'BinaryCrossEntropySigmoidError' + + +class CrossEntropyError(object): + """Multi-class cross entropy error.""" + + def __call__(self, outputs, targets): + """Calculates error function given a batch of outputs and targets. + + Args: + outputs: Array of model outputs of shape (batch_size, output_dim). + targets: Array of target outputs of shape (batch_size, output_dim). + + Returns: + Scalar error function value. + """ + return -np.mean(np.sum(targets * np.log(outputs), axis=1)) + + def grad(self, outputs, targets): + """Calculates gradient of error function with respect to outputs. + + Args: + outputs: Array of model outputs of shape (batch_size, output_dim). + targets: Array of target outputs of shape (batch_size, output_dim). + + Returns: + Gradient of error function with respect to outputs. + """ + return -(targets / outputs) / outputs.shape[0] + + def __repr__(self): + return 'CrossEntropyError' + + +class CrossEntropySoftmaxError(object): + """Multi-class cross entropy error with Softmax applied to outputs.""" + + def __call__(self, outputs, targets): + """Calculates error function given a batch of outputs and targets. + + Args: + outputs: Array of model outputs of shape (batch_size, output_dim). + targets: Array of target outputs of shape (batch_size, output_dim). + + Returns: + Scalar error function value. + """ + normOutputs = outputs - outputs.max(-1)[:, None] + logProb = normOutputs - np.log(np.sum(np.exp(normOutputs), axis=-1)[:, None]) + return -np.mean(np.sum(targets * logProb, axis=1)) + + def grad(self, outputs, targets): + """Calculates gradient of error function with respect to outputs. + + Args: + outputs: Array of model outputs of shape (batch_size, output_dim). + targets: Array of target outputs of shape (batch_size, output_dim). + + Returns: + Gradient of error function with respect to outputs. + """ + probs = np.exp(outputs - outputs.max(-1)[:, None]) + probs /= probs.sum(-1)[:, None] + return (probs - targets) / outputs.shape[0] + + def __repr__(self): + return 'CrossEntropySoftmaxError' diff --git a/mlp/initialisers.py b/mlp/initialisers.py new file mode 100644 index 00000000..8c8e2526 --- /dev/null +++ b/mlp/initialisers.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- +"""Parameter initialisers. + +This module defines classes to initialise the parameters in a layer. +""" + +import numpy as np +from mlp import DEFAULT_SEED + + +class ConstantInit(object): + """Constant parameter initialiser.""" + + def __init__(self, value): + """Construct a constant parameter initialiser. + + Args: + value: Value to initialise parameter to. + """ + self.value = value + + def __call__(self, shape): + return np.ones(shape=shape) * self.value + + +class UniformInit(object): + """Random uniform parameter initialiser.""" + + def __init__(self, low, high, rng=None): + """Construct a random uniform parameter initialiser. + + Args: + low: Lower bound of interval to sample from. + high: Upper bound of interval to sample from. + rng (RandomState): Seeded random number generator. + """ + self.low = low + self.high = high + if rng is None: + rng = np.random.RandomState(DEFAULT_SEED) + self.rng = rng + + def __call__(self, shape): + return self.rng.uniform(low=self.low, high=self.high, size=shape) + + +class NormalInit(object): + """Random normal parameter initialiser.""" + + def __init__(self, mean, std, rng=None): + """Construct a random uniform parameter initialiser. + + Args: + mean: Mean of distribution to sample from. + std: Standard deviation of distribution to sample from. + rng (RandomState): Seeded random number generator. + """ + self.mean = mean + self.std = std + if rng is None: + rng = np.random.RandomState(DEFAULT_SEED) + self.rng = rng + + def __call__(self, shape): + return self.rng.normal(loc=self.mean, scale=self.std, size=shape) + +class GlorotUniformInit(object): + """Glorot and Bengio (2010) random uniform weights initialiser. + + Initialises an two-dimensional parameter array using the 'normalized + initialisation' scheme suggested in [1] which attempts to maintain a + roughly constant variance in the activations and backpropagated gradients + of a multi-layer model consisting of interleaved affine and logistic + sigmoidal transformation layers. + + Weights are sampled from a zero-mean uniform distribution with standard + deviation `sqrt(2 / (input_dim * output_dim))` where `input_dim` and + `output_dim` are the input and output dimensions of the weight matrix + respectively. + + References: + [1]: Understanding the difficulty of training deep feedforward neural + networks, Glorot and Bengio (2010) + """ + + def __init__(self, gain=1., rng=None): + """Construct a normalised initilisation random initialiser object. + + Args: + gain: Multiplicative factor to scale initialised weights by. + Recommended values is 1 for affine layers followed by + logistic sigmoid layers (or another affine layer). + rng (RandomState): Seeded random number generator. + """ + self.gain = gain + if rng is None: + rng = np.random.RandomState(DEFAULT_SEED) + self.rng = rng + + def __call__(self, shape): + assert len(shape) == 2, ( + 'Initialiser should only be used for two dimensional arrays.') + std = self.gain * (2. / (shape[0] + shape[1]))**0.5 + half_width = 3.**0.5 * std + return self.rng.uniform(low=-half_width, high=half_width, size=shape) + + +class GlorotNormalInit(object): + """Glorot and Bengio (2010) random normal weights initialiser. + + Initialises an two-dimensional parameter array using the 'normalized + initialisation' scheme suggested in [1] which attempts to maintain a + roughly constant variance in the activations and backpropagated gradients + of a multi-layer model consisting of interleaved affine and logistic + sigmoidal transformation layers. + + Weights are sampled from a zero-mean normal distribution with standard + deviation `sqrt(2 / (input_dim * output_dim))` where `input_dim` and + `output_dim` are the input and output dimensions of the weight matrix + respectively. + + References: + [1]: Understanding the difficulty of training deep feedforward neural + networks, Glorot and Bengio (2010) + """ + + def __init__(self, gain=1., rng=None): + """Construct a normalised initilisation random initialiser object. + + Args: + gain: Multiplicative factor to scale initialised weights by. + Recommended values is 1 for affine layers followed by + logistic sigmoid layers (or another affine layer). + rng (RandomState): Seeded random number generator. + """ + self.gain = gain + if rng is None: + rng = np.random.RandomState(DEFAULT_SEED) + self.rng = rng + + def __call__(self, shape): + std = self.gain * (2. / (shape[0] + shape[1]))**0.5 + return self.rng.normal(loc=0., scale=std, size=shape) diff --git a/mlp/layers.py b/mlp/layers.py new file mode 100644 index 00000000..c6641c9a --- /dev/null +++ b/mlp/layers.py @@ -0,0 +1,824 @@ +# -*- coding: utf-8 -*- +"""Layer definitions. + +This module defines classes which encapsulate a single layer. + +These layers map input activations to output activation with the `fprop` +method and map gradients with repsect to outputs to gradients with respect to +their inputs with the `bprop` method. + +Some layers will have learnable parameters and so will additionally define +methods for getting and setting parameter and calculating gradients with +respect to the layer parameters. +""" + +import numpy as np +import mlp.initialisers as init +from mlp import DEFAULT_SEED + + +class Layer(object): + """Abstract class defining the interface for a layer.""" + + def fprop(self, inputs): + """Forward propagates activations through the layer transformation. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + + Returns: + outputs: Array of layer outputs of shape (batch_size, output_dim). + """ + raise NotImplementedError() + + def bprop(self, inputs, outputs, grads_wrt_outputs): + """Back propagates gradients through a layer. + + Given gradients with respect to the outputs of the layer calculates the + gradients with respect to the layer inputs. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + outputs: Array of layer outputs calculated in forward pass of + shape (batch_size, output_dim). + grads_wrt_outputs: Array of gradients with respect to the layer + outputs of shape (batch_size, output_dim). + + Returns: + Array of gradients with respect to the layer inputs of shape + (batch_size, input_dim). + """ + raise NotImplementedError() + + +class LayerWithParameters(Layer): + """Abstract class defining the interface for a layer with parameters.""" + + def grads_wrt_params(self, inputs, grads_wrt_outputs): + """Calculates gradients with respect to layer parameters. + + Args: + inputs: Array of inputs to layer of shape (batch_size, input_dim). + grads_wrt_to_outputs: Array of gradients with respect to the layer + outputs of shape (batch_size, output_dim). + + Returns: + List of arrays of gradients with respect to the layer parameters + with parameter gradients appearing in same order in tuple as + returned from `get_params` method. + """ + raise NotImplementedError() + + def params_penalty(self): + """Returns the parameter dependent penalty term for this layer. + + If no parameter-dependent penalty terms are set this returns zero. + """ + raise NotImplementedError() + + @property + def params(self): + """Returns a list of parameters of layer. + + Returns: + List of current parameter values. This list should be in the + corresponding order to the `values` argument to `set_params`. + """ + raise NotImplementedError() + + @params.setter + def params(self, values): + """Sets layer parameters from a list of values. + + Args: + values: List of values to set parameters to. This list should be + in the corresponding order to what is returned by `get_params`. + """ + raise NotImplementedError() + + +class StochasticLayerWithParameters(Layer): + """Specialised layer which uses a stochastic forward propagation.""" + + def __init__(self, rng=None): + """Constructs a new StochasticLayer object. + + Args: + rng (RandomState): Seeded random number generator object. + """ + if rng is None: + rng = np.random.RandomState(DEFAULT_SEED) + self.rng = rng + + def fprop(self, inputs, stochastic=True): + """Forward propagates activations through the layer transformation. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + stochastic: Flag allowing different deterministic + forward-propagation mode in addition to default stochastic + forward-propagation e.g. for use at test time. If False + a deterministic forward-propagation transformation + corresponding to the expected output of the stochastic + forward-propagation is applied. + + Returns: + outputs: Array of layer outputs of shape (batch_size, output_dim). + """ + raise NotImplementedError() + + def grads_wrt_params(self, inputs, grads_wrt_outputs): + """Calculates gradients with respect to layer parameters. + + Args: + inputs: Array of inputs to layer of shape (batch_size, input_dim). + grads_wrt_to_outputs: Array of gradients with respect to the layer + outputs of shape (batch_size, output_dim). + + Returns: + List of arrays of gradients with respect to the layer parameters + with parameter gradients appearing in same order in tuple as + returned from `get_params` method. + """ + raise NotImplementedError() + + def params_penalty(self): + """Returns the parameter dependent penalty term for this layer. + + If no parameter-dependent penalty terms are set this returns zero. + """ + raise NotImplementedError() + + @property + def params(self): + """Returns a list of parameters of layer. + + Returns: + List of current parameter values. This list should be in the + corresponding order to the `values` argument to `set_params`. + """ + raise NotImplementedError() + + @params.setter + def params(self, values): + """Sets layer parameters from a list of values. + + Args: + values: List of values to set parameters to. This list should be + in the corresponding order to what is returned by `get_params`. + """ + raise NotImplementedError() + + +class StochasticLayer(Layer): + """Specialised layer which uses a stochastic forward propagation.""" + + def __init__(self, rng=None): + """Constructs a new StochasticLayer object. + + Args: + rng (RandomState): Seeded random number generator object. + """ + if rng is None: + rng = np.random.RandomState(DEFAULT_SEED) + self.rng = rng + + def fprop(self, inputs, stochastic=True): + """Forward propagates activations through the layer transformation. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + stochastic: Flag allowing different deterministic + forward-propagation mode in addition to default stochastic + forward-propagation e.g. for use at test time. If False + a deterministic forward-propagation transformation + corresponding to the expected output of the stochastic + forward-propagation is applied. + + Returns: + outputs: Array of layer outputs of shape (batch_size, output_dim). + """ + raise NotImplementedError() + + def bprop(self, inputs, outputs, grads_wrt_outputs): + """Back propagates gradients through a layer. + + Given gradients with respect to the outputs of the layer calculates the + gradients with respect to the layer inputs. This should correspond to + default stochastic forward-propagation. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + outputs: Array of layer outputs calculated in forward pass of + shape (batch_size, output_dim). + grads_wrt_outputs: Array of gradients with respect to the layer + outputs of shape (batch_size, output_dim). + + Returns: + Array of gradients with respect to the layer inputs of shape + (batch_size, input_dim). + """ + raise NotImplementedError() + + +class AffineLayer(LayerWithParameters): + """Layer implementing an affine tranformation of its inputs. + + This layer is parameterised by a weight matrix and bias vector. + """ + + def __init__(self, input_dim, output_dim, + weights_initialiser=init.UniformInit(-0.1, 0.1), + biases_initialiser=init.ConstantInit(0.), + weights_penalty=None, biases_penalty=None): + """Initialises a parameterised affine layer. + + Args: + input_dim (int): Dimension of inputs to the layer. + output_dim (int): Dimension of the layer outputs. + weights_initialiser: Initialiser for the weight parameters. + biases_initialiser: Initialiser for the bias parameters. + weights_penalty: Weights-dependent penalty term (regulariser) or + None if no regularisation is to be applied to the weights. + biases_penalty: Biases-dependent penalty term (regulariser) or + None if no regularisation is to be applied to the biases. + """ + self.input_dim = input_dim + self.output_dim = output_dim + self.weights = weights_initialiser((self.output_dim, self.input_dim)) + self.biases = biases_initialiser(self.output_dim) + self.weights_penalty = weights_penalty + self.biases_penalty = biases_penalty + + def fprop(self, inputs): + """Forward propagates activations through the layer transformation. + + For inputs `x`, outputs `y`, weights `W` and biases `b` the layer + corresponds to `y = W.dot(x) + b`. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + + Returns: + outputs: Array of layer outputs of shape (batch_size, output_dim). + """ + return self.weights.dot(inputs.T).T + self.biases + + def bprop(self, inputs, outputs, grads_wrt_outputs): + """Back propagates gradients through a layer. + + Given gradients with respect to the outputs of the layer calculates the + gradients with respect to the layer inputs. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + outputs: Array of layer outputs calculated in forward pass of + shape (batch_size, output_dim). + grads_wrt_outputs: Array of gradients with respect to the layer + outputs of shape (batch_size, output_dim). + + Returns: + Array of gradients with respect to the layer inputs of shape + (batch_size, input_dim). + """ + return grads_wrt_outputs.dot(self.weights) + + def grads_wrt_params(self, inputs, grads_wrt_outputs): + """Calculates gradients with respect to layer parameters. + + Args: + inputs: array of inputs to layer of shape (batch_size, input_dim) + grads_wrt_to_outputs: array of gradients with respect to the layer + outputs of shape (batch_size, output_dim) + + Returns: + list of arrays of gradients with respect to the layer parameters + `[grads_wrt_weights, grads_wrt_biases]`. + """ + + grads_wrt_weights = np.dot(grads_wrt_outputs.T, inputs) + grads_wrt_biases = np.sum(grads_wrt_outputs, axis=0) + + if self.weights_penalty is not None: + grads_wrt_weights += self.weights_penalty.grad(parameter=self.weights) + + if self.biases_penalty is not None: + grads_wrt_biases += self.biases_penalty.grad(parameter=self.biases) + + return [grads_wrt_weights, grads_wrt_biases] + + def params_penalty(self): + """Returns the parameter dependent penalty term for this layer. + + If no parameter-dependent penalty terms are set this returns zero. + """ + params_penalty = 0 + if self.weights_penalty is not None: + params_penalty += self.weights_penalty(self.weights) + if self.biases_penalty is not None: + params_penalty += self.biases_penalty(self.biases) + return params_penalty + + @property + def params(self): + """A list of layer parameter values: `[weights, biases]`.""" + return [self.weights, self.biases] + + @params.setter + def params(self, values): + self.weights = values[0] + self.biases = values[1] + + def __repr__(self): + return 'AffineLayer(input_dim={0}, output_dim={1})'.format( + self.input_dim, self.output_dim) + + +class SigmoidLayer(Layer): + """Layer implementing an element-wise logistic sigmoid transformation.""" + + def fprop(self, inputs): + """Forward propagates activations through the layer transformation. + + For inputs `x` and outputs `y` this corresponds to + `y = 1 / (1 + exp(-x))`. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + + Returns: + outputs: Array of layer outputs of shape (batch_size, output_dim). + """ + return 1. / (1. + np.exp(-inputs)) + + def bprop(self, inputs, outputs, grads_wrt_outputs): + """Back propagates gradients through a layer. + + Given gradients with respect to the outputs of the layer calculates the + gradients with respect to the layer inputs. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + outputs: Array of layer outputs calculated in forward pass of + shape (batch_size, output_dim). + grads_wrt_outputs: Array of gradients with respect to the layer + outputs of shape (batch_size, output_dim). + + Returns: + Array of gradients with respect to the layer inputs of shape + (batch_size, input_dim). + """ + return grads_wrt_outputs * outputs * (1. - outputs) + + def __repr__(self): + return 'SigmoidLayer' + + +class ConvolutionalLayer(LayerWithParameters): + """Layer implementing a 2D convolution-based transformation of its inputs. + The layer is parameterised by a set of 2D convolutional kernels, a four + dimensional array of shape + (num_output_channels, num_input_channels, kernel_height, kernel_dim_2) + and a bias vector, a one dimensional array of shape + (num_output_channels,) + i.e. one shared bias per output channel. + Assuming no-padding is applied to the inputs so that outputs are only + calculated for positions where the kernel filters fully overlap with the + inputs, and that unit strides are used the outputs will have spatial extent + output_height = input_height - kernel_height + 1 + output_width = input_width - kernel_width + 1 + """ + + def __init__(self, num_input_channels, num_output_channels, + input_height, input_width, + kernel_height, kernel_width, + kernels_init=init.UniformInit(-0.01, 0.01), + biases_init=init.ConstantInit(0.), + kernels_penalty=None, biases_penalty=None): + """Initialises a parameterised convolutional layer. + Args: + num_input_channels (int): Number of channels in inputs to + layer (this may be number of colour channels in the input + images if used as the first layer in a model, or the + number of output channels, a.k.a. feature maps, from a + a previous convolutional layer). + num_output_channels (int): Number of channels in outputs + from the layer, a.k.a. number of feature maps. + input_height (int): Size of first input dimension of each 2D + channel of inputs. + input_width (int): Size of second input dimension of each 2D + channel of inputs. + kernel_height (int): Size of first dimension of each 2D channel of + kernels. + kernel_width (int): Size of second dimension of each 2D channel of + kernels. + kernels_intialiser: Initialiser for the kernel parameters. + biases_initialiser: Initialiser for the bias parameters. + kernels_penalty: Kernel-dependent penalty term (regulariser) or + None if no regularisation is to be applied to the kernels. + biases_penalty: Biases-dependent penalty term (regulariser) or + None if no regularisation is to be applied to the biases. + """ + self.num_input_channels = num_input_channels + self.num_output_channels = num_output_channels + self.input_height = input_height + self.input_width = input_width + self.kernel_height = kernel_height + self.kernel_width = kernel_width + self.kernels_init = kernels_init + self.biases_init = biases_init + self.kernels_shape = ( + num_output_channels, num_input_channels, kernel_height, kernel_width + ) + self.inputs_shape = ( + None, num_input_channels, input_height, input_width + ) + self.kernels = self.kernels_init(self.kernels_shape) + self.biases = self.biases_init(num_output_channels) + self.kernels_penalty = kernels_penalty + self.biases_penalty = biases_penalty + + self.cache = None + + def fprop(self, inputs): + """Forward propagates activations through the layer transformation. + For inputs `x`, outputs `y`, kernels `K` and biases `b` the layer + corresponds to `y = conv2d(x, K) + b`. + Args: + inputs: Array of layer inputs of shape (batch_size, num_input_channels, image_height, image_width). + Returns: + outputs: Array of layer outputs of shape (batch_size, num_output_channels, output_height, output_width). + """ + raise NotImplementedError + + def bprop(self, inputs, outputs, grads_wrt_outputs): + """Back propagates gradients through a layer. + Given gradients with respect to the outputs of the layer calculates the + gradients with respect to the layer inputs. + Args: + inputs: Array of layer inputs of shape + (batch_size, num_input_channels, input_height, input_width). + outputs: Array of layer outputs calculated in forward pass of + shape + (batch_size, num_output_channels, output_height, output_width). + grads_wrt_outputs: Array of gradients with respect to the layer + outputs of shape + (batch_size, num_output_channels, output_height, output_width). + Returns: + Array of gradients with respect to the layer inputs of shape + (batch_size, num_input_channels, input_height, input_width). + """ + # Pad the grads_wrt_outputs + raise NotImplementedError + + def grads_wrt_params(self, inputs, grads_wrt_outputs): + """Calculates gradients with respect to layer parameters. + Args: + inputs: array of inputs to layer of shape (batch_size, input_dim) + grads_wrt_to_outputs: array of gradients with respect to the layer + outputs of shape + (batch_size, num_output_channels, output_height, output_width). + Returns: + list of arrays of gradients with respect to the layer parameters + `[grads_wrt_kernels, grads_wrt_biases]`. + """ + # Get inputs_col from previous fprop + raise NotImplementedError + + def params_penalty(self): + """Returns the parameter dependent penalty term for this layer. + If no parameter-dependent penalty terms are set this returns zero. + """ + params_penalty = 0 + if self.kernels_penalty is not None: + params_penalty += self.kernels_penalty(self.kernels) + if self.biases_penalty is not None: + params_penalty += self.biases_penalty(self.biases) + return params_penalty + + @property + def params(self): + """A list of layer parameter values: `[kernels, biases]`.""" + return [self.kernels, self.biases] + + @params.setter + def params(self, values): + self.kernels = values[0] + self.biases = values[1] + + def __repr__(self): + return ( + 'ConvolutionalLayer(\n' + ' num_input_channels={0}, num_output_channels={1},\n' + ' input_height={2}, input_width={3},\n' + ' kernel_height={4}, kernel_width={5}\n' + ')' + .format(self.num_input_channels, self.num_output_channels, + self.input_height, self.input_width, self.kernel_height, + self.kernel_width) + ) + + +class ReluLayer(Layer): + """Layer implementing an element-wise rectified linear transformation.""" + + def fprop(self, inputs): + """Forward propagates activations through the layer transformation. + + For inputs `x` and outputs `y` this corresponds to `y = max(0, x)`. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + + Returns: + outputs: Array of layer outputs of shape (batch_size, output_dim). + """ + return np.maximum(inputs, 0.) + + def bprop(self, inputs, outputs, grads_wrt_outputs): + """Back propagates gradients through a layer. + + Given gradients with respect to the outputs of the layer calculates the + gradients with respect to the layer inputs. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + outputs: Array of layer outputs calculated in forward pass of + shape (batch_size, output_dim). + grads_wrt_outputs: Array of gradients with respect to the layer + outputs of shape (batch_size, output_dim). + + Returns: + Array of gradients with respect to the layer inputs of shape + (batch_size, input_dim). + """ + return (outputs > 0) * grads_wrt_outputs + + def __repr__(self): + return 'ReluLayer' + + +class TanhLayer(Layer): + """Layer implementing an element-wise hyperbolic tangent transformation.""" + + def fprop(self, inputs): + """Forward propagates activations through the layer transformation. + + For inputs `x` and outputs `y` this corresponds to `y = tanh(x)`. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + + Returns: + outputs: Array of layer outputs of shape (batch_size, output_dim). + """ + return np.tanh(inputs) + + def bprop(self, inputs, outputs, grads_wrt_outputs): + """Back propagates gradients through a layer. + + Given gradients with respect to the outputs of the layer calculates the + gradients with respect to the layer inputs. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + outputs: Array of layer outputs calculated in forward pass of + shape (batch_size, output_dim). + grads_wrt_outputs: Array of gradients with respect to the layer + outputs of shape (batch_size, output_dim). + + Returns: + Array of gradients with respect to the layer inputs of shape + (batch_size, input_dim). + """ + return (1. - outputs ** 2) * grads_wrt_outputs + + def __repr__(self): + return 'TanhLayer' + + +class SoftmaxLayer(Layer): + """Layer implementing a softmax transformation.""" + + def fprop(self, inputs): + """Forward propagates activations through the layer transformation. + + For inputs `x` and outputs `y` this corresponds to + + `y = exp(x) / sum(exp(x))`. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + + Returns: + outputs: Array of layer outputs of shape (batch_size, output_dim). + """ + # subtract max inside exponential to improve numerical stability - + # when we divide through by sum this term cancels + exp_inputs = np.exp(inputs - inputs.max(-1)[:, None]) + return exp_inputs / exp_inputs.sum(-1)[:, None] + + def bprop(self, inputs, outputs, grads_wrt_outputs): + """Back propagates gradients through a layer. + + Given gradients with respect to the outputs of the layer calculates the + gradients with respect to the layer inputs. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + outputs: Array of layer outputs calculated in forward pass of + shape (batch_size, output_dim). + grads_wrt_outputs: Array of gradients with respect to the layer + outputs of shape (batch_size, output_dim). + + Returns: + Array of gradients with respect to the layer inputs of shape + (batch_size, input_dim). + """ + return (outputs * (grads_wrt_outputs - + (grads_wrt_outputs * outputs).sum(-1)[:, None])) + + def __repr__(self): + return 'SoftmaxLayer' + + +class RadialBasisFunctionLayer(Layer): + """Layer implementing projection to a grid of radial basis functions.""" + + def __init__(self, grid_dim, intervals=[[0., 1.]]): + """Creates a radial basis function layer object. + + Args: + grid_dim: Integer specifying how many basis function to use in + grid across input space per dimension (so total number of + basis functions will be grid_dim**input_dim) + intervals: List of intervals (two element lists or tuples) + specifying extents of axis-aligned region in input-space to + tile basis functions in grid across. For example for a 2D input + space spanning [0, 1] x [0, 1] use intervals=[[0, 1], [0, 1]]. + """ + num_basis = grid_dim ** len(intervals) + self.centres = np.array(np.meshgrid(*[ + np.linspace(low, high, grid_dim) for (low, high) in intervals]) + ).reshape((len(intervals), -1)) + self.scales = np.array([ + [(high - low) * 1. / grid_dim] for (low, high) in intervals]) + + def fprop(self, inputs): + """Forward propagates activations through the layer transformation. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + + Returns: + outputs: Array of layer outputs of shape (batch_size, output_dim). + """ + return np.exp(-(inputs[..., None] - self.centres[None, ...]) ** 2 / + self.scales ** 2).reshape((inputs.shape[0], -1)) + + def bprop(self, inputs, outputs, grads_wrt_outputs): + """Back propagates gradients through a layer. + + Given gradients with respect to the outputs of the layer calculates the + gradients with respect to the layer inputs. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + outputs: Array of layer outputs calculated in forward pass of + shape (batch_size, output_dim). + grads_wrt_outputs: Array of gradients with respect to the layer + outputs of shape (batch_size, output_dim). + + Returns: + Array of gradients with respect to the layer inputs of shape + (batch_size, input_dim). + """ + num_basis = self.centres.shape[1] + return -2 * ( + ((inputs[..., None] - self.centres[None, ...]) / self.scales ** 2) * + grads_wrt_outputs.reshape((inputs.shape[0], -1, num_basis)) + ).sum(-1) + + def __repr__(self): + return 'RadialBasisFunctionLayer(grid_dim={0})'.format(self.grid_dim) + + +class DropoutLayer(StochasticLayer): + """Layer which stochastically drops input dimensions in its output.""" + + def __init__(self, rng=None, incl_prob=0.5, share_across_batch=True): + """Construct a new dropout layer. + + Args: + rng (RandomState): Seeded random number generator. + incl_prob: Scalar value in (0, 1] specifying the probability of + each input dimension being included in the output. + share_across_batch: Whether to use same dropout mask across + all inputs in a batch or use per input masks. + """ + super(DropoutLayer, self).__init__(rng) + assert incl_prob > 0. and incl_prob <= 1. + self.incl_prob = incl_prob + self.share_across_batch = share_across_batch + self.rng = rng + + def fprop(self, inputs, stochastic=True): + """Forward propagates activations through the layer transformation. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + stochastic: Flag allowing different deterministic + forward-propagation mode in addition to default stochastic + forward-propagation e.g. for use at test time. If False + a deterministic forward-propagation transformation + corresponding to the expected output of the stochastic + forward-propagation is applied. + + Returns: + outputs: Array of layer outputs of shape (batch_size, output_dim). + """ + if stochastic: + mask_shape = (1,) + inputs.shape[1:] if self.share_across_batch else inputs.shape + self._mask = (self.rng.uniform(size=mask_shape) < self.incl_prob) + return inputs * self._mask + else: + return inputs * self.incl_prob + + def bprop(self, inputs, outputs, grads_wrt_outputs): + """Back propagates gradients through a layer. + + Given gradients with respect to the outputs of the layer calculates the + gradients with respect to the layer inputs. This should correspond to + default stochastic forward-propagation. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + outputs: Array of layer outputs calculated in forward pass of + shape (batch_size, output_dim). + grads_wrt_outputs: Array of gradients with respect to the layer + outputs of shape (batch_size, output_dim). + + Returns: + Array of gradients with respect to the layer inputs of shape + (batch_size, input_dim). + """ + return grads_wrt_outputs * self._mask + + def __repr__(self): + return 'DropoutLayer(incl_prob={0:.1f})'.format(self.incl_prob) + + +class ReshapeLayer(Layer): + """Layer which reshapes dimensions of inputs.""" + + def __init__(self, output_shape=None): + """Create a new reshape layer object. + + Args: + output_shape: Tuple specifying shape each input in batch should + be reshaped to in outputs. This **excludes** the batch size + so the shape of the final output array will be + (batch_size, ) + output_shape + Similarly to numpy.reshape, one shape dimension can be -1. In + this case, the value is inferred from the size of the input + array and remaining dimensions. The shape specified must be + compatible with the input array shape - i.e. the total number + of values in the array cannot be changed. If set to `None` the + output shape will be set to + (batch_size, -1) + which will flatten all the inputs to vectors. + """ + self.output_shape = (-1,) if output_shape is None else output_shape + + def fprop(self, inputs): + """Forward propagates activations through the layer transformation. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + + Returns: + outputs: Array of layer outputs of shape (batch_size, output_dim). + """ + return inputs.reshape((inputs.shape[0],) + self.output_shape) + + def bprop(self, inputs, outputs, grads_wrt_outputs): + """Back propagates gradients through a layer. + + Given gradients with respect to the outputs of the layer calculates the + gradients with respect to the layer inputs. + + Args: + inputs: Array of layer inputs of shape (batch_size, input_dim). + outputs: Array of layer outputs calculated in forward pass of + shape (batch_size, output_dim). + grads_wrt_outputs: Array of gradients with respect to the layer + outputs of shape (batch_size, output_dim). + + Returns: + Array of gradients with respect to the layer inputs of shape + (batch_size, input_dim). + """ + return grads_wrt_outputs.reshape(inputs.shape) + + def __repr__(self): + return 'ReshapeLayer(output_shape={0})'.format(self.output_shape) \ No newline at end of file diff --git a/mlp/learning_rules.py b/mlp/learning_rules.py new file mode 100644 index 00000000..52f34ccd --- /dev/null +++ b/mlp/learning_rules.py @@ -0,0 +1,388 @@ +# -*- coding: utf-8 -*- +"""Learning rules. + +This module contains classes implementing gradient based learning rules. +""" + +import numpy as np + + +class GradientDescentLearningRule(object): + """Simple (stochastic) gradient descent learning rule. + + For a scalar error function `E(p[0], p_[1] ... )` of some set of + potentially multidimensional parameters this attempts to find a local + minimum of the loss function by applying updates to each parameter of the + form + + p[i] := p[i] - learning_rate * dE/dp[i] + + With `learning_rate` a positive scaling parameter. + + The error function used in successive applications of these updates may be + a stochastic estimator of the true error function (e.g. when the error with + respect to only a subset of data-points is calculated) in which case this + will correspond to a stochastic gradient descent learning rule. + """ + + def __init__(self, learning_rate=1e-3): + """Creates a new learning rule object. + + Args: + learning_rate: A postive scalar to scale gradient updates to the + parameters by. This needs to be carefully set - if too large + the learning dynamic will be unstable and may diverge, while + if set too small learning will proceed very slowly. + + """ + assert learning_rate > 0., 'learning_rate should be positive.' + self.learning_rate = learning_rate + + def initialise(self, params): + """Initialises the state of the learning rule for a set or parameters. + + This must be called before `update_params` is first called. + + Args: + params: A list of the parameters to be optimised. Note these will + be updated *in-place* to avoid reallocating arrays on each + update. + """ + self.params = params + + def reset(self): + """Resets any additional state variables to their intial values. + + For this learning rule there are no additional state variables so we + do nothing here. + """ + pass + + def update_params(self, grads_wrt_params): + """Applies a single gradient descent update to all parameters. + + All parameter updates are performed using in-place operations and so + nothing is returned. + + Args: + grads_wrt_params: A list of gradients of the scalar loss function + with respect to each of the parameters passed to `initialise` + previously, with this list expected to be in the same order. + """ + for param, grad in zip(self.params, grads_wrt_params): + param -= self.learning_rate * grad + + +class MomentumLearningRule(GradientDescentLearningRule): + """Gradient descent with momentum learning rule. + + This extends the basic gradient learning rule by introducing extra + momentum state variables for each parameter. These can help the learning + dynamic help overcome shallow local minima and speed convergence when + making multiple successive steps in a similar direction in parameter space. + + For parameter p[i] and corresponding momentum m[i] the updates for a + scalar loss function `L` are of the form + + m[i] := mom_coeff * m[i] - learning_rate * dL/dp[i] + p[i] := p[i] + m[i] + + with `learning_rate` a positive scaling parameter for the gradient updates + and `mom_coeff` a value in [0, 1] that determines how much 'friction' there + is the system and so how quickly previous momentum contributions decay. + """ + + def __init__(self, learning_rate=1e-3, mom_coeff=0.9): + """Creates a new learning rule object. + + Args: + learning_rate: A postive scalar to scale gradient updates to the + parameters by. This needs to be carefully set - if too large + the learning dynamic will be unstable and may diverge, while + if set too small learning will proceed very slowly. + mom_coeff: A scalar in the range [0, 1] inclusive. This determines + the contribution of the previous momentum value to the value + after each update. If equal to 0 the momentum is set to exactly + the negative scaled gradient each update and so this rule + collapses to standard gradient descent. If equal to 1 the + momentum will just be decremented by the scaled gradient at + each update. This is equivalent to simulating the dynamic in + a frictionless system. Due to energy conservation the loss + of 'potential energy' as the dynamics moves down the loss + function surface will lead to an increasingly large 'kinetic + energy' and so speed, meaning the updates will become + increasingly large, potentially unstably so. Typically a value + less than but close to 1 will avoid these issues and cause the + dynamic to converge to a local minima where the gradients are + by definition zero. + """ + super(MomentumLearningRule, self).__init__(learning_rate) + assert mom_coeff >= 0. and mom_coeff <= 1., ( + 'mom_coeff should be in the range [0, 1].' + ) + self.mom_coeff = mom_coeff + + def initialise(self, params): + """Initialises the state of the learning rule for a set or parameters. + + This must be called before `update_params` is first called. + + Args: + params: A list of the parameters to be optimised. Note these will + be updated *in-place* to avoid reallocating arrays on each + update. + """ + super(MomentumLearningRule, self).initialise(params) + self.moms = [] + for param in self.params: + self.moms.append(np.zeros_like(param)) + + def reset(self): + """Resets any additional state variables to their intial values. + + For this learning rule this corresponds to zeroing all the momenta. + """ + for mom in zip(self.moms): + mom *= 0. + + def update_params(self, grads_wrt_params): + """Applies a single update to all parameters. + + All parameter updates are performed using in-place operations and so + nothing is returned. + + Args: + grads_wrt_params: A list of gradients of the scalar loss function + with respect to each of the parameters passed to `initialise` + previously, with this list expected to be in the same order. + """ + for param, mom, grad in zip(self.params, self.moms, grads_wrt_params): + mom *= self.mom_coeff + mom -= self.learning_rate * grad + param += mom + + +class AdamLearningRule(GradientDescentLearningRule): + """Adaptive moments (Adam) learning rule. + First-order gradient-descent based learning rule which uses adaptive + estimates of first and second moments of the parameter gradients to + calculate the parameter updates. + References: + [1]: Adam: a method for stochastic optimisation + Kingma and Ba, 2015 + """ + + def __init__(self, learning_rate=1e-3, beta_1=0.9, beta_2=0.999, + epsilon=1e-8): + """Creates a new learning rule object. + Args: + learning_rate: A postive scalar to scale gradient updates to the + parameters by. This needs to be carefully set - if too large + the learning dynamic will be unstable and may diverge, while + if set too small learning will proceed very slowly. + beta_1: Exponential decay rate for gradient first moment estimates. + This should be a scalar value in [0, 1]. The running gradient + first moment estimate is calculated using + `m_1 = beta_1 * m_1_prev + (1 - beta_1) * g` + where `m_1_prev` is the previous estimate and `g` the current + parameter gradients. + beta_2: Exponential decay rate for gradient second moment + estimates. This should be a scalar value in [0, 1]. The run + gradient second moment estimate is calculated using + `m_2 = beta_2 * m_2_prev + (1 - beta_2) * g**2` + where `m_2_prev` is the previous estimate and `g` the current + parameter gradients. + epsilon: 'Softening' parameter to stop updates diverging when + second moment estimates are close to zero. Should be set to + a small positive value. + """ + super(AdamLearningRule, self).__init__(learning_rate) + assert beta_1 >= 0. and beta_1 <= 1., 'beta_1 should be in [0, 1].' + assert beta_2 >= 0. and beta_2 <= 1., 'beta_2 should be in [0, 2].' + assert epsilon > 0., 'epsilon should be > 0.' + self.beta_1 = beta_1 + self.beta_2 = beta_2 + self.epsilon = epsilon + + def initialise(self, params): + """Initialises the state of the learning rule for a set or parameters. + This must be called before `update_params` is first called. + Args: + params: A list of the parameters to be optimised. Note these will + be updated *in-place* to avoid reallocating arrays on each + update. + """ + super(AdamLearningRule, self).initialise(params) + self.moms_1 = [] + for param in self.params: + self.moms_1.append(np.zeros_like(param)) + self.moms_2 = [] + for param in self.params: + self.moms_2.append(np.zeros_like(param)) + self.step_count = 0 + + def reset(self): + """Resets any additional state variables to their initial values. + For this learning rule this corresponds to zeroing the estimates of + the first and second moments of the gradients. + """ + for mom_1, mom_2 in zip(self.moms_1, self.moms_2): + mom_1 *= 0. + mom_2 *= 0. + self.step_count = 0 + + def update_params(self, grads_wrt_params): + """Applies a single update to all parameters. + All parameter updates are performed using in-place operations and so + nothing is returned. + Args: + grads_wrt_params: A list of gradients of the scalar loss function + with respect to each of the parameters passed to `initialise` + previously, with this list expected to be in the same order. + """ + for param, mom_1, mom_2, grad in zip( + self.params, self.moms_1, self.moms_2, grads_wrt_params): + mom_1 *= self.beta_1 + mom_1 += (1. - self.beta_1) * grad + mom_2 *= self.beta_2 + mom_2 += (1. - self.beta_2) * grad ** 2 + alpha_t = ( + self.learning_rate * + (1. - self.beta_2 ** (self.step_count + 1)) ** 0.5 / + (1. - self.beta_1 ** (self.step_count + 1)) + ) + param -= alpha_t * mom_1 / (mom_2 ** 0.5 + self.epsilon) + self.step_count += 1 + + +class AdaGradLearningRule(GradientDescentLearningRule): + """Adaptive gradients (AdaGrad) learning rule. + First-order gradient-descent based learning rule which normalises gradient + updates by a running sum of the past squared gradients. + References: + [1]: Adaptive Subgradient Methods for Online Learning and Stochastic + Optimization. Duchi, Haxan and Singer, 2011 + """ + + def __init__(self, learning_rate=1e-2, epsilon=1e-8): + """Creates a new learning rule object. + Args: + learning_rate: A postive scalar to scale gradient updates to the + parameters by. This needs to be carefully set - if too large + the learning dynamic will be unstable and may diverge, while + if set too small learning will proceed very slowly. + epsilon: 'Softening' parameter to stop updates diverging when + sums of squared gradients are close to zero. Should be set to + a small positive value. + """ + super(AdaGradLearningRule, self).__init__(learning_rate) + assert epsilon > 0., 'epsilon should be > 0.' + self.epsilon = epsilon + + def initialise(self, params): + """Initialises the state of the learning rule for a set or parameters. + This must be called before `update_params` is first called. + Args: + params: A list of the parameters to be optimised. Note these will + be updated *in-place* to avoid reallocating arrays on each + update. + """ + super(AdaGradLearningRule, self).initialise(params) + self.sum_sq_grads = [] + for param in self.params: + self.sum_sq_grads.append(np.zeros_like(param)) + + def reset(self): + """Resets any additional state variables to their initial values. + For this learning rule this corresponds to zeroing all the sum of + squared gradient states. + """ + for sum_sq_grad in self.sum_sq_grads: + sum_sq_grad *= 0. + + def update_params(self, grads_wrt_params): + """Applies a single update to all parameters. + All parameter updates are performed using in-place operations and so + nothing is returned. + Args: + grads_wrt_params: A list of gradients of the scalar loss function + with respect to each of the parameters passed to `initialise` + previously, with this list expected to be in the same order. + """ + for param, sum_sq_grad, grad in zip( + self.params, self.sum_sq_grads, grads_wrt_params): + sum_sq_grad += grad ** 2 + param -= (self.learning_rate * grad / + (sum_sq_grad + self.epsilon) ** 0.5) + + +class RMSPropLearningRule(GradientDescentLearningRule): + """Root mean squared gradient normalised learning rule (RMSProp). + First-order gradient-descent based learning rule which normalises gradient + updates by a exponentially smoothed estimate of the gradient second + moments. + References: + [1]: Neural Networks for Machine Learning: Lecture 6a slides + University of Toronto,Computer Science Course CSC321 + http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf + """ + + def __init__(self, learning_rate=1e-3, beta=0.9, epsilon=1e-8): + """Creates a new learning rule object. + Args: + learning_rate: A postive scalar to scale gradient updates to the + parameters by. This needs to be carefully set - if too large + the learning dynamic will be unstable and may diverge, while + if set too small learning will proceed very slowly. + beta: Exponential decay rate for gradient second moment + estimates. This should be a scalar value in [0, 1]. The running + gradient second moment estimate is calculated using + `m_2 = beta * m_2_prev + (1 - beta) * g**2` + where `m_2_prev` is the previous estimate and `g` the current + parameter gradients. + epsilon: 'Softening' parameter to stop updates diverging when + gradient second moment estimates are close to zero. Should be + set to a small positive value. + """ + super(RMSPropLearningRule, self).__init__(learning_rate) + assert beta >= 0. and beta <= 1., 'beta should be in [0, 1].' + assert epsilon > 0., 'epsilon should be > 0.' + self.beta = beta + self.epsilon = epsilon + + def initialise(self, params): + """Initialises the state of the learning rule for a set or parameters. + This must be called before `update_params` is first called. + Args: + params: A list of the parameters to be optimised. Note these will + be updated *in-place* to avoid reallocating arrays on each + update. + """ + super(RMSPropLearningRule, self).initialise(params) + self.moms_2 = [] + for param in self.params: + self.moms_2.append(np.zeros_like(param)) + + def reset(self): + """Resets any additional state variables to their initial values. + For this learning rule this corresponds to zeroing all gradient + second moment estimates. + """ + for mom_2 in self.moms_2: + mom_2 *= 0. + + def update_params(self, grads_wrt_params): + """Applies a single update to all parameters. + All parameter updates are performed using in-place operations and so + nothing is returned. + Args: + grads_wrt_params: A list of gradients of the scalar loss function + with respect to each of the parameters passed to `initialise` + previously, with this list expected to be in the same order. + """ + for param, mom_2, grad in zip( + self.params, self.moms_2, grads_wrt_params): + mom_2 *= self.beta + mom_2 += (1. - self.beta) * grad ** 2 + param -= (self.learning_rate * grad / + (mom_2 + self.epsilon) ** 0.5) diff --git a/mlp/models.py b/mlp/models.py new file mode 100644 index 00000000..b292cf4c --- /dev/null +++ b/mlp/models.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +"""Model definitions. + +This module implements objects encapsulating learnable models of input-output +relationships. The model objects implement methods for forward propagating +the inputs through the transformation(s) defined by the model to produce +outputs (and intermediate states) and for calculating gradients of scalar +functions of the outputs with respect to the model parameters. +""" + +from mlp.layers import LayerWithParameters, StochasticLayer, StochasticLayerWithParameters + + +class SingleLayerModel(object): + """A model consisting of a single transformation layer.""" + + def __init__(self, layer): + """Create a new single layer model instance. + + Args: + layer: The layer object defining the model architecture. + """ + self.layer = layer + + @property + def params(self): + """A list of all of the parameters of the model.""" + return self.layer.params + + def fprop(self, inputs, evaluation=False): + """Calculate the model outputs corresponding to a batch of inputs. + + Args: + inputs: Batch of inputs to the model. + + Returns: + List which is a concatenation of the model inputs and model + outputs, this being done for consistency of the interface with + multi-layer models for which `fprop` returns a list of + activations through all immediate layers of the model and including + the inputs and outputs. + """ + activations = [inputs, self.layer.fprop(inputs)] + return activations + + def grads_wrt_params(self, activations, grads_wrt_outputs): + """Calculates gradients with respect to the model parameters. + + Args: + activations: List of all activations from forward pass through + model using `fprop`. + grads_wrt_outputs: Gradient with respect to the model outputs of + the scalar function parameter gradients are being calculated + for. + + Returns: + List of gradients of the scalar function with respect to all model + parameters. + """ + return self.layer.grads_wrt_params(activations[0], grads_wrt_outputs) + + def __repr__(self): + return 'SingleLayerModel(' + str(self.layer) + ')' + + +class MultipleLayerModel(object): + """A model consisting of multiple layers applied sequentially.""" + + def __init__(self, layers): + """Create a new multiple layer model instance. + + Args: + layers: List of the the layer objecst defining the model in the + order they should be applied from inputs to outputs. + """ + self.layers = layers + + @property + def params(self): + """A list of all of the parameters of the model.""" + params = [] + for layer in self.layers: + if isinstance(layer, LayerWithParameters) or isinstance(layer, StochasticLayerWithParameters): + params += layer.params + return params + + def fprop(self, inputs, evaluation=False): + """Forward propagates a batch of inputs through the model. + + Args: + inputs: Batch of inputs to the model. + + Returns: + List of the activations at the output of all layers of the model + plus the inputs (to the first layer) as the first element. The + last element of the list corresponds to the model outputs. + """ + activations = [inputs] + for i, layer in enumerate(self.layers): + if evaluation: + if issubclass(type(self.layers[i]), StochasticLayer) or issubclass(type(self.layers[i]), + StochasticLayerWithParameters): + current_activations = self.layers[i].fprop(activations[i], stochastic=False) + else: + current_activations = self.layers[i].fprop(activations[i]) + else: + if issubclass(type(self.layers[i]), StochasticLayer) or issubclass(type(self.layers[i]), + StochasticLayerWithParameters): + current_activations = self.layers[i].fprop(activations[i], stochastic=True) + else: + current_activations = self.layers[i].fprop(activations[i]) + activations.append(current_activations) + return activations + + def grads_wrt_params(self, activations, grads_wrt_outputs): + """Calculates gradients with respect to the model parameters. + + Args: + activations: List of all activations from forward pass through + model using `fprop`. + grads_wrt_outputs: Gradient with respect to the model outputs of + the scalar function parameter gradients are being calculated + for. + + Returns: + List of gradients of the scalar function with respect to all model + parameters. + """ + grads_wrt_params = [] + for i, layer in enumerate(self.layers[::-1]): + inputs = activations[-i - 2] + outputs = activations[-i - 1] + grads_wrt_inputs = layer.bprop(inputs, outputs, grads_wrt_outputs) + if isinstance(layer, LayerWithParameters) or isinstance(layer, StochasticLayerWithParameters): + grads_wrt_params += layer.grads_wrt_params( + inputs, grads_wrt_outputs)[::-1] + grads_wrt_outputs = grads_wrt_inputs + return grads_wrt_params[::-1] + + def __repr__(self): + return ( + 'MultiLayerModel(\n ' + + '\n '.join([str(layer) for layer in self.layers]) + + '\n)' + ) diff --git a/mlp/optimisers.py b/mlp/optimisers.py new file mode 100644 index 00000000..8ab313af --- /dev/null +++ b/mlp/optimisers.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- +"""Model optimisers. + +This module contains objects implementing (batched) stochastic gradient descent +based optimisation of models. +""" + +import time +import logging +from collections import OrderedDict +import numpy as np +import tqdm + +logger = logging.getLogger(__name__) + + +class Optimiser(object): + """Basic model optimiser.""" + + def __init__(self, model, error, learning_rule, train_dataset, + valid_dataset=None, data_monitors=None, notebook=False): + """Create a new optimiser instance. + + Args: + model: The model to optimise. + error: The scalar error function to minimise. + learning_rule: Gradient based learning rule to use to minimise + error. + train_dataset: Data provider for training set data batches. + valid_dataset: Data provider for validation set data batches. + data_monitors: Dictionary of functions evaluated on targets and + model outputs (averaged across both full training and + validation data sets) to monitor during training in addition + to the error. Keys should correspond to a string label for + the statistic being evaluated. + """ + self.model = model + self.error = error + self.learning_rule = learning_rule + self.learning_rule.initialise(self.model.params) + self.train_dataset = train_dataset + self.valid_dataset = valid_dataset + self.data_monitors = OrderedDict([('error', error)]) + if data_monitors is not None: + self.data_monitors.update(data_monitors) + self.notebook = notebook + if notebook: + self.tqdm_progress = tqdm.tqdm_notebook + else: + self.tqdm_progress = tqdm.tqdm + + def do_training_epoch(self): + """Do a single training epoch. + + This iterates through all batches in training dataset, for each + calculating the gradient of the estimated error given the batch with + respect to all the model parameters and then updates the model + parameters according to the learning rule. + """ + with self.tqdm_progress(total=self.train_dataset.num_batches) as train_progress_bar: + train_progress_bar.set_description("Epoch Progress") + for inputs_batch, targets_batch in self.train_dataset: + activations = self.model.fprop(inputs_batch) + grads_wrt_outputs = self.error.grad(activations[-1], targets_batch) + grads_wrt_params = self.model.grads_wrt_params( + activations, grads_wrt_outputs) + self.learning_rule.update_params(grads_wrt_params) + train_progress_bar.update(1) + + def eval_monitors(self, dataset, label): + """Evaluates the monitors for the given dataset. + + Args: + dataset: Dataset to perform evaluation with. + label: Tag to add to end of monitor keys to identify dataset. + + Returns: + OrderedDict of monitor values evaluated on dataset. + """ + data_mon_vals = OrderedDict([(key + label, 0.) for key + in self.data_monitors.keys()]) + for inputs_batch, targets_batch in dataset: + activations = self.model.fprop(inputs_batch, evaluation=True) + for key, data_monitor in self.data_monitors.items(): + data_mon_vals[key + label] += data_monitor( + activations[-1], targets_batch) + for key, data_monitor in self.data_monitors.items(): + data_mon_vals[key + label] /= dataset.num_batches + return data_mon_vals + + def get_epoch_stats(self): + """Computes training statistics for an epoch. + + Returns: + An OrderedDict with keys corresponding to the statistic labels and + values corresponding to the value of the statistic. + """ + epoch_stats = OrderedDict() + epoch_stats.update(self.eval_monitors(self.train_dataset, '(train)')) + if self.valid_dataset is not None: + epoch_stats.update(self.eval_monitors( + self.valid_dataset, '(valid)')) + return epoch_stats + + def log_stats(self, epoch, epoch_time, stats): + """Outputs stats for a training epoch to a logger. + + Args: + epoch (int): Epoch counter. + epoch_time: Time taken in seconds for the epoch to complete. + stats: Monitored stats for the epoch. + """ + logger.info('Epoch {0}: {1:.1f}s to complete\n {2}'.format( + epoch, epoch_time, + ', '.join(['{0}={1:.2e}'.format(k, v) for (k, v) in stats.items()]) + )) + + def train(self, num_epochs, stats_interval=5): + """Trains a model for a set number of epochs. + + Args: + num_epochs: Number of epochs (complete passes through trainin + dataset) to train for. + stats_interval: Training statistics will be recorded and logged + every `stats_interval` epochs. + + Returns: + Tuple with first value being an array of training run statistics + and the second being a dict mapping the labels for the statistics + recorded to their column index in the array. + """ + start_train_time = time.time() + run_stats = [list(self.get_epoch_stats().values())] + with self.tqdm_progress(total=num_epochs) as progress_bar: + progress_bar.set_description("Experiment Progress") + for epoch in range(1, num_epochs + 1): + start_time = time.time() + self.do_training_epoch() + epoch_time = time.time()- start_time + if epoch % stats_interval == 0: + stats = self.get_epoch_stats() + self.log_stats(epoch, epoch_time, stats) + run_stats.append(list(stats.values())) + progress_bar.update(1) + finish_train_time = time.time() + total_train_time = finish_train_time - start_train_time + return np.array(run_stats), {k: i for i, k in enumerate(stats.keys())}, total_train_time + diff --git a/mlp/penalties.py b/mlp/penalties.py new file mode 100644 index 00000000..28764344 --- /dev/null +++ b/mlp/penalties.py @@ -0,0 +1,90 @@ +import numpy as np + +seed = 22102017 +rng = np.random.RandomState(seed) + + +class L1Penalty(object): + """L1 parameter penalty. + + Term to add to the objective function penalising parameters + based on their L1 norm. + """ + + def __init__(self, coefficient): + """Create a new L1 penalty object. + + Args: + coefficient: Positive constant to scale penalty term by. + """ + assert coefficient > 0., 'Penalty coefficient must be positive.' + self.coefficient = coefficient + + def __call__(self, parameter): + """Calculate L1 penalty value for a parameter. + + Args: + parameter: Array corresponding to a model parameter. + + Returns: + Value of penalty term. + """ + return self.coefficient * abs(parameter).sum() + + def grad(self, parameter): + """Calculate the penalty gradient with respect to the parameter. + + Args: + parameter: Array corresponding to a model parameter. + + Returns: + Value of penalty gradient with respect to parameter. This + should be an array of the same shape as the parameter. + """ + return self.coefficient * np.sign(parameter) + + def __repr__(self): + return 'L1Penalty({0})'.format(self.coefficient) + + +class L2Penalty(object): + """L1 parameter penalty. + + Term to add to the objective function penalising parameters + based on their L2 norm. + """ + + def __init__(self, coefficient): + """Create a new L2 penalty object. + + Args: + coefficient: Positive constant to scale penalty term by. + """ + assert coefficient > 0., 'Penalty coefficient must be positive.' + self.coefficient = coefficient + + def __call__(self, parameter): + """Calculate L2 penalty value for a parameter. + + Args: + parameter: Array corresponding to a model parameter. + + Returns: + Value of penalty term. + """ + return 0.5 * self.coefficient * (parameter ** 2).sum() + + def grad(self, parameter): + """Calculate the penalty gradient with respect to the parameter. + + Args: + parameter: Array corresponding to a model parameter. + + Returns: + Value of penalty gradient with respect to parameter. This + should be an array of the same shape as the parameter. + """ + return self.coefficient * parameter + + def __repr__(self): + return 'L2Penalty({0})'.format(self.coefficient) \ No newline at end of file diff --git a/mlp/schedulers.py b/mlp/schedulers.py new file mode 100644 index 00000000..4f53e7ee --- /dev/null +++ b/mlp/schedulers.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +"""Training schedulers. + +This module contains classes implementing schedulers which control the +evolution of learning rule hyperparameters (such as learning rate) over a +training run. +""" + +import numpy as np + + +class ConstantLearningRateScheduler(object): + """Example of scheduler interface which sets a constant learning rate.""" + + def __init__(self, learning_rate): + """Construct a new constant learning rate scheduler object. + + Args: + learning_rate: Learning rate to use in learning rule. + """ + self.learning_rate = learning_rate + + def update_learning_rule(self, learning_rule, epoch_number): + """Update the hyperparameters of the learning rule. + + Run at the beginning of each epoch. + + Args: + learning_rule: Learning rule object being used in training run, + any scheduled hyperparameters to be altered should be + attributes of this object. + epoch_number: Integer index of training epoch about to be run. + """ + learning_rule.learning_rate = self.learning_rate diff --git a/notebooks/Coursework_2_Pytorch_Introduction.ipynb b/notebooks/Coursework_2_Pytorch_Introduction.ipynb new file mode 100644 index 00000000..3c79a18d --- /dev/null +++ b/notebooks/Coursework_2_Pytorch_Introduction.ipynb @@ -0,0 +1,665 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Introduction to PyTorch \n", + "\n", + "## Introduction\n", + "Pytorch is a modern, intuitive, Pythonic and fast framework for building differentiable graphs. Neural networks, as it happens, are a type of acyclic differentiable graph, making PyTorch a convenient framework to use, should you wish to build (potentially) complicated deep neural networks fairly easily.\n", + "\n", + "## MLP package vs Pytorch\n", + "**Student**: Why do I have to learn to use PyTorch now? I've spent all this time working on the MLP framework. Was that a waste of time?\n", + "\n", + "**TA**: Pytorch is everything the MLP package is, and more. It's faster, cleaner and far more up to date with modern deep learning advances, meaning it is easy to tailor to experiments you may wish to run. Since it is one of the main deep learning frameworks being used by industry and research alike, it conforms to the expectation of real users like researchers and engineers. The result is that PyTorch is (and continues to become) a robust and flexible package. Coming to grips with PyTorch now means that you'll be able to apply it to any future project that uses deep learning. \n", + "\n", + "Furthermore, the MLP framework was written in NumPy and your time developing this has taught you some fundamental implementation details of NNs: this could (and should) make future research directions more easy to think of and will also enable your debugging prowess. PyTorch was written to emulate NumPy as much as possible, so it will feel very familiar to you. The skills you have acquired are highly transferable (they generalize well, so not much overfitting there!).\n", + "\n", + "The devleopers of PyTorch try to make sure that the \"latest and greatest\" state-of-the-art research is included and implemented. If this is not the case, you will often find other people reproducing . If you can't wait, you can reproduce it yourself and open source it (a great way to showcase your skills and get github likes).\n", + "\n", + "PyTorch has Autograd! Automatic differentiation. \"What is this?\" you may ask. Remember having to write all those backprop functions? Forget about it. Automatic differentiation allows you to backprop through any PyTorch operation you have used in your graph, by simply calling backward(). This [blog-post](https://jdhao.github.io/2017/11/12/pytorch-computation-graph/) explains how Pytorch's autograd works at an intuitive level.\n", + "\n", + "**Student**: Why did we even have to use the MLP package? Why did we even bother if such awesome frameworks are available?\n", + "\n", + "**TA**: The purpose of the MLP package was not to allow you to build fast deep learning systems. Instead, it was to help teach you the low level mechanics and sensitivities of building a deep learning system. Building this enabled you to dive deep into how to go about building a deep learning framework from scratch. The intuitions you have gained from going through your assignments and courseworks allow you to see deeper in what makes or breaks a deep learning system, at a level few people actually care to explore. You are no longer restricted to the higher level modules provided by Pytorch/TensorFlow. \n", + "\n", + "If, for example, a new project required you to build something that does not exist in PyTorch/TensorFlow, or otherwise modify existing modules in a way that requires understanding and intuitions on backpropagation and layer/optimizer/component implementation, you would be able to do it much more easily than others who did not. You are now equipped to understand differentiable graphs, the chain rule, numerical errors, debugging at the lowest level and deep learning system architecture. \n", + "\n", + "By trying to implement your modules in an efficient way, you have also become aware of how to optimize a system for efficiency, and gave you intuitions on how one could further improve such a system (parallelization of implementations). \n", + "\n", + "Finally, the slowness of CPU training has allowed you to understand just how important modern GPU acceleration is, for deep learning research and applications. By coming across a large breadth of problems and understanding their origins, you will now be able to both anticipate and solve future problems in a more comprehensive way than someone who did not go through the trouble of implementing the basics from scratch. \n", + "\n", + "\n", + "\n", + "## Getting Started\n", + "\n", + "**Student**: So, how is the learning curve of Pytorch? How do I start?\n", + "\n", + "**TA**: You can start by using this notebook on your experiments, it should teach you quite a lot on how to properly use PyTorch for basic conv net training. You should be aware of the [official pytorch github](https://github.com/pytorch/pytorch), the [pytorch official documentation page](https://pytorch.org/docs/stable/nn.html) and the [pytorch tutorials page](https://pytorch.org/tutorials/). \n", + "\n", + "Over the past year, nearly all students using PyTorch and Tensorflow on MLP and on projects found it easier and faster to get up to speed with PyTorch. In fact, I was a TensorFlow user myself, and learning TensorFlow was much more challenging than PyTorch. Mainly because TensorFlow has its own way of 'thinking' about how you build a graph and execute operations - whereas PyTorch is dynamic and works like NumPy, hence is more intuitive. If you were able to work well with the MLP package, you'll be up and running in no time. \n", + "\n", + "**Student**: OK, so how fast is pytorch compared to MLP?\n", + "\n", + "**TA**: On the CPU side of things, you'll find pytorch at least 5x faster than the MLP framework (about equal for fully connected networks, but much faster for more complicated things like convolutions - unless you write extremely efficient convolutional layer code), and if you choose to use GPUs, either using MS Azure, Google Cloud or our very own MLP Cluster (available for next semester), you can expect, depending on implementation and hardware an approximate 25-70x speed ups, compared to the CPU performance of pytorch. Yes, that means an experiment that would run overnight, now would only require about 15 minutes.\n", + "\n", + "**Student**: Ahh, where should I go to ask more questions?\n", + "\n", + "**TA**: As always, start with a Google/DuckDuckGo search, then have a look at the PyTorch Github and PyTorch docs, and if you can't find the answer come to Piazza and the lab sessions. We will be there to support you.\n", + "\n", + "\n", + "#### Note: The code in this jupyter notebook is to introduce you to pytorch and allow you to play around with it in an interactive manner. However, to run your experiments, you should use the Pytorch experiment framework located in ```pytorch_mlp_framework/```. Instructions on how to use it can be found in ```notes/pytorch-experiment-framework.md``` along with the comments and documentation included in the code itself." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Imports and helper functions\n", + "\n", + "First, let's import the packages necessary for our tutorial" + ] + }, + { + "cell_type": "code", + "execution_count": 89, + "metadata": {}, + "outputs": [], + "source": [ + "from torch import nn\n", + "from copy import deepcopy\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.optim as optim\n", + "import torch.nn.functional as F\n", + "import torch.backends.cudnn as cudnn\n", + "import torchvision\n", + "import tqdm\n", + "import os\n", + "import mlp.data_providers as data_providers\n", + "import numpy as np" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's write a helper function for plotting" + ] + }, + { + "cell_type": "code", + "execution_count": 103, + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "%matplotlib inline\n", + "plt.style.use('ggplot')\n", + "\n", + "def plot_stats_in_graph(total_losses, y_axis_label, x_axis_label):\n", + " \n", + " # Plot the change in the validation and training set error over training.\n", + " fig_1 = plt.figure(figsize=(8, 4))\n", + " ax_1 = fig_1.add_subplot(111)\n", + " for k in total_losses.keys():\n", + " if \"loss\" in k:\n", + " ax_1.plot(np.arange(len(total_losses[k])), total_losses[k], label=k)\n", + " ax_1.legend(loc=0)\n", + " ax_1.set_xlabel(x_axis_label)\n", + " ax_1.set_ylabel(y_axis_label)\n", + " \n", + "\n", + " plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Basics: What is a tensor?\n", + "\n", + "In numpy we used arrays, whereas in pytorch we use tensors. Tensors are basically multi-dimensional arrays, that can also automatically compute backward passes, and thus gradients, as well as store data to be used at any point in our pytorch pipelines." + ] + }, + { + "cell_type": "code", + "execution_count": 104, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor([ 5., 1., 10.]) tensor(5.3333) tensor(3.6818) \n", + " [ 5. 1. 10.] 5.3333335 3.6817868\n" + ] + } + ], + "source": [ + "data_pytorch = torch.Tensor([5., 1., 10.]).float()\n", + "data_numpy = np.array([5., 1., 10]).astype(np.float32)\n", + "\n", + "print(data_pytorch, data_pytorch.mean(), data_pytorch.std(unbiased=False), '\\n',\n", + " data_numpy, data_numpy.mean(), data_numpy.std())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Tensors have a rich support for a variety of operations, for more information look at the official pytorch [documentation page](https://pytorch.org/docs/stable/torch.html#torch.std)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Basics: A simple pytorch graph of operations\n", + "\n", + "Pytorch automatically tracks the flow of data through operations without requiring explicit instruction to do so. \n", + "For example, we can easily compute the grads wrt to a variable **a** (which is initialized with requires grad = True to let the framework know that we'll be requiring the grads of that variable) by simple calling .backward() followed by .grad:\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 105, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor([[[[0.0016, 0.0018, 0.0013, ..., 0.0024, 0.0022, 0.0012],\n", + " [0.0017, 0.0027, 0.0025, ..., 0.0022, 0.0016, 0.0023],\n", + " [0.0017, 0.0023, 0.0020, ..., 0.0012, 0.0019, 0.0027],\n", + " ...,\n", + " [0.0020, 0.0011, 0.0018, ..., 0.0025, 0.0028, 0.0026],\n", + " [0.0022, 0.0021, 0.0016, ..., 0.0018, 0.0016, 0.0024],\n", + " [0.0017, 0.0023, 0.0022, ..., 0.0025, 0.0024, 0.0022]],\n", + "\n", + " [[0.0021, 0.0025, 0.0019, ..., 0.0017, 0.0022, 0.0026],\n", + " [0.0024, 0.0020, 0.0020, ..., 0.0025, 0.0018, 0.0017],\n", + " [0.0025, 0.0023, 0.0019, ..., 0.0017, 0.0024, 0.0013],\n", + " ...,\n", + " [0.0027, 0.0014, 0.0022, ..., 0.0015, 0.0012, 0.0021],\n", + " [0.0030, 0.0019, 0.0025, ..., 0.0029, 0.0027, 0.0032],\n", + " [0.0021, 0.0024, 0.0021, ..., 0.0019, 0.0018, 0.0021]],\n", + "\n", + " [[0.0019, 0.0026, 0.0024, ..., 0.0029, 0.0023, 0.0023],\n", + " [0.0016, 0.0017, 0.0021, ..., 0.0023, 0.0016, 0.0022],\n", + " [0.0021, 0.0026, 0.0023, ..., 0.0019, 0.0021, 0.0021],\n", + " ...,\n", + " [0.0011, 0.0026, 0.0020, ..., 0.0017, 0.0020, 0.0021],\n", + " [0.0022, 0.0025, 0.0024, ..., 0.0027, 0.0019, 0.0017],\n", + " [0.0015, 0.0020, 0.0015, ..., 0.0019, 0.0024, 0.0022]]],\n", + "\n", + "\n", + " [[[0.0019, 0.0026, 0.0030, ..., 0.0017, 0.0020, 0.0030],\n", + " [0.0012, 0.0029, 0.0026, ..., 0.0020, 0.0024, 0.0019],\n", + " [0.0021, 0.0019, 0.0024, ..., 0.0032, 0.0022, 0.0020],\n", + " ...,\n", + " [0.0021, 0.0023, 0.0020, ..., 0.0024, 0.0020, 0.0019],\n", + " [0.0021, 0.0020, 0.0026, ..., 0.0024, 0.0017, 0.0022],\n", + " [0.0019, 0.0022, 0.0021, ..., 0.0024, 0.0023, 0.0024]],\n", + "\n", + " [[0.0019, 0.0027, 0.0015, ..., 0.0027, 0.0020, 0.0023],\n", + " [0.0020, 0.0025, 0.0021, ..., 0.0020, 0.0020, 0.0020],\n", + " [0.0019, 0.0017, 0.0019, ..., 0.0019, 0.0018, 0.0025],\n", + " ...,\n", + " [0.0024, 0.0022, 0.0026, ..., 0.0013, 0.0020, 0.0026],\n", + " [0.0023, 0.0017, 0.0021, ..., 0.0024, 0.0018, 0.0026],\n", + " [0.0017, 0.0019, 0.0023, ..., 0.0020, 0.0020, 0.0024]],\n", + "\n", + " [[0.0018, 0.0030, 0.0020, ..., 0.0024, 0.0028, 0.0019],\n", + " [0.0026, 0.0023, 0.0026, ..., 0.0023, 0.0022, 0.0022],\n", + " [0.0023, 0.0024, 0.0013, ..., 0.0025, 0.0020, 0.0027],\n", + " ...,\n", + " [0.0024, 0.0018, 0.0024, ..., 0.0012, 0.0021, 0.0023],\n", + " [0.0019, 0.0016, 0.0016, ..., 0.0024, 0.0019, 0.0021],\n", + " [0.0029, 0.0020, 0.0018, ..., 0.0022, 0.0021, 0.0021]]],\n", + "\n", + "\n", + " [[[0.0020, 0.0022, 0.0014, ..., 0.0013, 0.0019, 0.0025],\n", + " [0.0020, 0.0023, 0.0021, ..., 0.0021, 0.0017, 0.0019],\n", + " [0.0023, 0.0024, 0.0021, ..., 0.0024, 0.0024, 0.0028],\n", + " ...,\n", + " [0.0025, 0.0018, 0.0017, ..., 0.0024, 0.0014, 0.0023],\n", + " [0.0029, 0.0026, 0.0024, ..., 0.0030, 0.0025, 0.0022],\n", + " [0.0018, 0.0017, 0.0025, ..., 0.0024, 0.0024, 0.0027]],\n", + "\n", + " [[0.0021, 0.0021, 0.0020, ..., 0.0020, 0.0017, 0.0025],\n", + " [0.0021, 0.0018, 0.0014, ..., 0.0019, 0.0014, 0.0018],\n", + " [0.0027, 0.0023, 0.0023, ..., 0.0024, 0.0023, 0.0030],\n", + " ...,\n", + " [0.0025, 0.0023, 0.0016, ..., 0.0028, 0.0020, 0.0021],\n", + " [0.0032, 0.0021, 0.0018, ..., 0.0024, 0.0021, 0.0030],\n", + " [0.0025, 0.0021, 0.0011, ..., 0.0019, 0.0021, 0.0022]],\n", + "\n", + " [[0.0018, 0.0021, 0.0016, ..., 0.0022, 0.0019, 0.0018],\n", + " [0.0023, 0.0031, 0.0017, ..., 0.0026, 0.0024, 0.0023],\n", + " [0.0020, 0.0022, 0.0013, ..., 0.0021, 0.0028, 0.0024],\n", + " ...,\n", + " [0.0018, 0.0013, 0.0023, ..., 0.0021, 0.0021, 0.0019],\n", + " [0.0025, 0.0005, 0.0016, ..., 0.0021, 0.0017, 0.0015],\n", + " [0.0026, 0.0021, 0.0012, ..., 0.0021, 0.0018, 0.0021]]],\n", + "\n", + "\n", + " ...,\n", + "\n", + "\n", + " [[[0.0014, 0.0020, 0.0025, ..., 0.0020, 0.0016, 0.0021],\n", + " [0.0025, 0.0022, 0.0020, ..., 0.0018, 0.0017, 0.0025],\n", + " [0.0021, 0.0016, 0.0020, ..., 0.0021, 0.0023, 0.0025],\n", + " ...,\n", + " [0.0025, 0.0016, 0.0029, ..., 0.0024, 0.0022, 0.0024],\n", + " [0.0015, 0.0028, 0.0024, ..., 0.0020, 0.0017, 0.0021],\n", + " [0.0027, 0.0022, 0.0018, ..., 0.0025, 0.0022, 0.0019]],\n", + "\n", + " [[0.0027, 0.0024, 0.0019, ..., 0.0026, 0.0019, 0.0013],\n", + " [0.0029, 0.0019, 0.0021, ..., 0.0027, 0.0024, 0.0023],\n", + " [0.0022, 0.0013, 0.0018, ..., 0.0022, 0.0015, 0.0025],\n", + " ...,\n", + " [0.0020, 0.0017, 0.0020, ..., 0.0023, 0.0024, 0.0024],\n", + " [0.0024, 0.0021, 0.0021, ..., 0.0026, 0.0026, 0.0027],\n", + " [0.0022, 0.0019, 0.0030, ..., 0.0022, 0.0023, 0.0022]],\n", + "\n", + " [[0.0029, 0.0017, 0.0022, ..., 0.0021, 0.0023, 0.0020],\n", + " [0.0014, 0.0021, 0.0020, ..., 0.0024, 0.0019, 0.0019],\n", + " [0.0025, 0.0024, 0.0020, ..., 0.0021, 0.0020, 0.0020],\n", + " ...,\n", + " [0.0023, 0.0020, 0.0020, ..., 0.0028, 0.0021, 0.0025],\n", + " [0.0019, 0.0021, 0.0022, ..., 0.0021, 0.0025, 0.0020],\n", + " [0.0017, 0.0023, 0.0023, ..., 0.0028, 0.0021, 0.0014]]],\n", + "\n", + "\n", + " [[[0.0020, 0.0018, 0.0020, ..., 0.0025, 0.0021, 0.0024],\n", + " [0.0019, 0.0023, 0.0023, ..., 0.0021, 0.0014, 0.0018],\n", + " [0.0022, 0.0022, 0.0022, ..., 0.0025, 0.0027, 0.0025],\n", + " ...,\n", + " [0.0016, 0.0023, 0.0016, ..., 0.0020, 0.0025, 0.0012],\n", + " [0.0022, 0.0018, 0.0020, ..., 0.0026, 0.0020, 0.0019],\n", + " [0.0025, 0.0024, 0.0021, ..., 0.0022, 0.0022, 0.0026]],\n", + "\n", + " [[0.0029, 0.0022, 0.0022, ..., 0.0029, 0.0016, 0.0018],\n", + " [0.0021, 0.0021, 0.0023, ..., 0.0027, 0.0024, 0.0026],\n", + " [0.0012, 0.0023, 0.0025, ..., 0.0028, 0.0016, 0.0022],\n", + " ...,\n", + " [0.0021, 0.0020, 0.0017, ..., 0.0023, 0.0021, 0.0020],\n", + " [0.0027, 0.0012, 0.0012, ..., 0.0023, 0.0015, 0.0017],\n", + " [0.0024, 0.0021, 0.0020, ..., 0.0011, 0.0018, 0.0020]],\n", + "\n", + " [[0.0017, 0.0019, 0.0022, ..., 0.0026, 0.0018, 0.0009],\n", + " [0.0021, 0.0020, 0.0028, ..., 0.0018, 0.0017, 0.0026],\n", + " [0.0023, 0.0020, 0.0022, ..., 0.0023, 0.0019, 0.0016],\n", + " ...,\n", + " [0.0023, 0.0023, 0.0019, ..., 0.0026, 0.0016, 0.0024],\n", + " [0.0019, 0.0022, 0.0015, ..., 0.0021, 0.0018, 0.0024],\n", + " [0.0017, 0.0018, 0.0028, ..., 0.0020, 0.0017, 0.0031]]],\n", + "\n", + "\n", + " [[[0.0017, 0.0021, 0.0019, ..., 0.0020, 0.0026, 0.0022],\n", + " [0.0023, 0.0021, 0.0017, ..., 0.0016, 0.0018, 0.0019],\n", + " [0.0015, 0.0020, 0.0022, ..., 0.0015, 0.0028, 0.0027],\n", + " ...,\n", + " [0.0020, 0.0019, 0.0015, ..., 0.0019, 0.0018, 0.0019],\n", + " [0.0025, 0.0026, 0.0021, ..., 0.0015, 0.0023, 0.0023],\n", + " [0.0016, 0.0019, 0.0022, ..., 0.0022, 0.0011, 0.0024]],\n", + "\n", + " [[0.0019, 0.0013, 0.0020, ..., 0.0015, 0.0026, 0.0027],\n", + " [0.0022, 0.0017, 0.0022, ..., 0.0016, 0.0017, 0.0023],\n", + " [0.0028, 0.0026, 0.0013, ..., 0.0029, 0.0026, 0.0017],\n", + " ...,\n", + " [0.0028, 0.0018, 0.0021, ..., 0.0025, 0.0017, 0.0022],\n", + " [0.0026, 0.0016, 0.0019, ..., 0.0026, 0.0016, 0.0019],\n", + " [0.0020, 0.0015, 0.0021, ..., 0.0027, 0.0027, 0.0011]],\n", + "\n", + " [[0.0028, 0.0024, 0.0025, ..., 0.0020, 0.0026, 0.0020],\n", + " [0.0027, 0.0022, 0.0013, ..., 0.0021, 0.0027, 0.0026],\n", + " [0.0018, 0.0016, 0.0024, ..., 0.0020, 0.0022, 0.0024],\n", + " ...,\n", + " [0.0026, 0.0017, 0.0020, ..., 0.0024, 0.0021, 0.0012],\n", + " [0.0019, 0.0022, 0.0020, ..., 0.0025, 0.0028, 0.0019],\n", + " [0.0023, 0.0019, 0.0018, ..., 0.0017, 0.0021, 0.0020]]]])\n" + ] + } + ], + "source": [ + "a = torch.randn((32, 3, 14, 14), requires_grad=True)\n", + "b = torch.ones((32, 3, 14, 14)) * 5\n", + "\n", + "result_addition = a + b\n", + "result_double = result_addition * 2\n", + "result_square = result_double ** 2\n", + "result_mean = result_square.mean()\n", + "\n", + "loss = result_mean\n", + "\n", + "loss.backward()\n", + "\n", + "print(a.grad)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Student**: Ok, so we can build graphs, what about neural networks? Are there any pre-built layers? How do we train things? How do we define parameters and biases for our models? \n", + "\n", + "**TA**: Don't rush. Let's take it step by step. Let's look at nn.Parameters first.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**TA**: In Pytorch all learnable components are created using the nn.Parameter class. That class, automatically tracks all gradients, and allows quick and easy updates in a given graph.\n", + "\n", + "**Note**: np.dot for a single batch going to a single 2D weight matrix is called using F.linear in Pytorch.\n", + "\n", + "**Further Note**: There also exist ParameterDicts for dictionaries of parameters, and ParameterLists when you define a list of parameters for part of your model." + ] + }, + { + "cell_type": "code", + "execution_count": 106, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([16, 32])\n", + "current loss tensor(-0.0286, grad_fn=)\n", + "current loss tensor(-0.0366, grad_fn=)\n", + "current loss tensor(-0.0524, grad_fn=)\n", + "current loss tensor(-0.0762, grad_fn=)\n", + "current loss tensor(-0.1079, grad_fn=)\n", + "current loss tensor(-0.1475, grad_fn=)\n", + "current loss tensor(-0.1950, grad_fn=)\n", + "current loss tensor(-0.2505, grad_fn=)\n", + "current loss tensor(-0.3139, grad_fn=)\n", + "current loss tensor(-0.3852, grad_fn=)\n" + ] + } + ], + "source": [ + "weights = nn.Parameter(torch.randn(32, 32), requires_grad=True)\n", + "inputs = torch.randn(16, 32)\n", + "outputs = F.linear(inputs, weights)\n", + "learning_rate = 0.1\n", + "\n", + "print(outputs.shape)\n", + "\n", + "for i in range(10):\n", + " outputs = F.linear(inputs, weights)\n", + " loss = torch.mean(outputs)\n", + " loss.backward()\n", + " weights.data = weights.data - learning_rate * weights.grad\n", + " print('current loss', loss)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## nn.Modules and why they are important\n", + "\n", + "Pytorch implements a class called the nn.Module class. The nn.Module class automatically detects any nn.Parameter, nn.ParameterList or nn.ParameterDict and adds it to a collection of parameters which can be easily accessed using .parameters and/or .named_parameters().\n", + "\n", + "Let's look at an example:\n", + "\n", + "Let's build a fully connected layer followed by an activation function that can be preselected, similar to coursework 1. " + ] + }, + { + "cell_type": "code", + "execution_count": 124, + "metadata": {}, + "outputs": [], + "source": [ + "class LinearLayerWithActivation(nn.Module):\n", + " def __init__(self, input_shape, num_units, bias=False, activation_type=nn.ReLU()):\n", + " super(LinearLayerWithActivation, self).__init__()\n", + " self.activation_type = activation_type\n", + " self.weights = nn.Parameter(torch.empty(size=(num_units, input_shape[1]), requires_grad=True))\n", + " \n", + " nn.init.normal_(self.weights)\n", + " \n", + " if bias:\n", + " self.bias = nn.Parameter(torch.zeros(num_units), requires_grad=True)\n", + " else:\n", + " self.bias = None\n", + " \n", + " def forward(self, x):\n", + " out = F.linear(x, self.weights, self.bias)\n", + " out = self.activation_type.forward(out)\n", + " return out\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 175, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Parameters with name weights and shape torch.Size([512, 128])\n", + "Parameters with name bias and shape torch.Size([512])\n" + ] + } + ], + "source": [ + "x = torch.arange(16*128).view(16, 128).float()\n", + "y = torch.arange((16))\n", + "\n", + "fcc_net = LinearLayerWithActivation(input_shape=x.shape, num_units=512, bias=True, activation_type=nn.Identity())\n", + "optimizer = optim.Adam(fcc_net.parameters(), amsgrad=False, weight_decay=0.0)\n", + "\n", + "\n", + "for name, params in fcc_net.named_parameters():\n", + " print('Parameters with name', name, 'and shape', params.shape)\n", + "\n", + "metric_dict = {'losses': []} \n", + " \n", + "for i in range(50):\n", + "\n", + " out = fcc_net.forward(x)\n", + " loss = F.cross_entropy(out, y)\n", + " fcc_net.zero_grad() #removes grads of previous step\n", + " optimizer.zero_grad() #removes grads of previous step\n", + " loss.backward() #compute gradients of current step\n", + " optimizer.step() #update step\n", + " metric_dict['losses'].append(loss.detach().cpu().numpy()) #.detach: Copies the value of the loss \n", + "# and removes it from the graph, \n", + "# .cpu() sends to cpu, and \n", + "# numpy(), converts it to numpy format." + ] + }, + { + "cell_type": "code", + "execution_count": 162, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAf0AAAEJCAYAAACez/6HAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAAgAElEQVR4nO3dd3hUZdrH8e9zJgkhDIEUEGlKKCpI00RCTYCgIFWaDVfWXRUjouAqYFcQcRUpguIiYEVFBBQEkRBCEVlDVVEEWSyICMlEILSQzPP+MZqVV3ADDjMpv891eV3kMHPmPrcJd55urLUWERERKfWcYAcgIiIigaGiLyIiUkao6IuIiJQRKvoiIiJlhIq+iIhIGaGiLyIiUkaEBDuAQNi9e7df7xcbG0tWVpZf71kWKY/+o1z6j3LpP8ql/5xuLqtXr37S62rpi4iIlBEq+iIiImWEir6IiEgZUSbG9EVEpHSx1nL06FG8Xi/GmGCHc9b99NNPHDt27IRr1locxyE8PLzIOVDRFxGREufo0aOEhoYSElI2ylhISAgul+t31/Pz8zl69Cjly5cv0n3UvS8iIiWO1+stMwX/j4SEhOD1eov++rMYywkWLVrEsmXLsNbSsWNHunbtSm5uLuPHj2ffvn1UqVKFoUOH4na7sdYyc+ZMNm7cSLly5UhNTSUuLg6AjIwM5s6dC0Dv3r1JTk4O1COIiEgxURa69IvqdHIRkJb+d999x7JlyxgzZgxPPfUUGzZs4Mcff2T+/Pk0btyYSZMm0bhxY+bPnw/Axo0b2bNnD5MmTeKWW27hxRdfBCA3N5c5c+YwZswYxowZw5w5c8jNzQ3EIwBgvQV435pOwU/+XfcvIiISCAEp+j/88AP169enXLlyuFwuLrroIj755BMyMzNJSkoCICkpiczMTADWrVtHu3btMMbQoEEDDh06RE5ODps2baJJkya43W7cbjdNmjRh06ZNgXiEXx7kO+yqD8kacj3eJfOwBQWB+2wRESlW6tevH+wQTltAuvdr1arFm2++ycGDBwkLC2Pjxo3UrVuX/fv3ExUVBUBUVBQHDhwAwOPxEBsbW/j+mJgYPB4PHo+HmJiYwuvR0dF4PJ7ffV5aWhppaWkAjB079oR7/SmxsRRMnkXui+M5OmcmrvWriUwdQWi9i/xz/zImJCTEf/9vyjjl0n+US/85m7n86aefisWYfiBjONVnlStXrsh5Dki0NWvWpGfPnowePZrw8HDOO+88HOfUnQzW2t9dO9WYxcmup6SkkJKSUvi1f7eBdBEz/Anyli4gf9a/8Nx7M6ZjN0zP6zHhRZs9KT7aotN/lEv/US7952zm8tixYyedzR5o+fn5WGsZPXo0y5cvxxjDkCFD6NmzJz/99BO33XYbBw8epKCggCeeeIL4+HjuvvtuPv30U4wxXH311dxyyy1888033H///WRnZ1O+fHmeeuop6tWrx4IFCxg/fjwul4uKFSsWzmn7rWPHjv0uz6fahjdgv6J06NCBDh06ADBr1ixiYmKoVKkSOTk5REVFkZOTQ2RkJOBr2f/2AbKzs4mKiiI6Opovvvii8LrH46Fhw4aBeoRCxhjMJa1wLmyKnfcqdtkC7IY1ONfdhmmaEPB4RETKMu+b07Df7/TrPU2tOjjX3Fyk1y5atIgtW7awdOlSPB4PV155JYmJicybN4+kpCTuvPNOCgoKOHLkCFu2bGHPnj2kp6cDsH//fgDuvfdexo4dS1xcHBs2bGDkyJG8/fbbTJgwgddff51atWqRnZ39p58rYEv2fn2wrKwsPvnkE1q3bk18fDwrVqwAYMWKFSQk+ApmfHw8K1euxFrLtm3biIiIICoqimbNmrF582Zyc3PJzc1l8+bNNGvWLFCP8DsmogLO9YNwhj8J4RF4J4/CO/VJ7M+/H3IQEZHS6ZNPPqFXr164XC6qVKlCYmJiYX2aPXs248aN48svv8TtdlO7dm2+++47HnjgAZYvX07FihU5dOgQ69ev59Zbb6VTp04MHz6cvXv3Ar56OHToUF599VUK/DCPLGAt/XHjxnHw4EFCQkL429/+htvtplevXowfP5709HRiY2MZNmwYAM2bN2fDhg0MGTKEsLAwUlNTAXC73fTp04eRI0cC0LdvX9xud6Ae4ZRM3QtxHhyPXTIPu/At7BcbMb3/gmnXGfMHwxgiIvLnFbVFfracbEgaIDExkXfeeYdly5Zx5513MmjQIPr168fSpUvJyMjgpZdeYsGCBTz66KNERkaydOnS393jySefZMOGDSxfvpzLL7+cDz/8kOjo6DOO1dhTRVuKBPJoXfvTbryvPw9fboa4C3BuuB1T83y/fn5pobFT/1Eu/Ue59J+zmcvDhw8TERFxVu5dVPXr12f79u0sWrSI1157jVdffZWff/6ZLl26sHDhQvLy8qhWrRohISFMmzaN77//nrvuuovQ0FAqVqzI559/ztChQ1m6dCk9evTg5ptvpnv37lhr+eKLL2jUqBHffPMN559/PiEhIXTo0IFnnnmGiy+++IQ4TpaLoI/plxXmnOo4Qx/D/jsD+9Z0vKOHYjr1wnS7BlOuXLDDExERP+vSpQvr16+nU6dOGGO4//77qVq1KrNnz2bq1KmEhIRQoUIFJk6cyI8//siwYcMKd9H7ted68uTJjBw5kokTJ5Kfn0/Pnj1p1KgRo0ePZufOnVhradOmDY0aNfpTsaqlfwaK+turzT2AnfMS9qM0iD0H5/pBmIsv9WssJZlaVP6jXPqPcuk/pb2lH0ghISHk5+ef9O9Op6WvAeezyLgjcQYOwfnHGAgJxTvxUbz/egq7PyfYoYmISBmkoh8A5oKLcR6aiOl5HXbjWrwPpuLNWIT1akc/EREJHBX9ADGhoTjdrsF5eBKcXw/7+lS8T9yL/XZHsEMTESlxysDIdJGdTi5U9APMVKuBM/QxzN/vBs8+vI/f7dtY4sjhYIcmIlJiOI5zyjHusiQ/P/8Pd7j9/zR7PwiMMZgWSdjGl2LnvYZNX4hd9xHm6r9j4lvryEgRkf8hPDyco0ePcuzYsTLxb2a5cuU4duzYCdestTiOQ3h4eJHvo6IfRCbCjbl+ELZVB7yvPYf91z+xHzXHuW4Qpuq5wQ5PRKTYMsZQvnzZOe/EXysh1L1fDJg6DXDuG4e55mbYsRXvw4PxLngTezwv2KGJiEgpoqJfTBiXC6djd5zHnsM0a4F9bxbeR+7AbtkY7NBERKSUUNEvZkxUDM6t9+IMfRQweCc87DvEx6PNQkRE5M9R0S+mTMPmOI88i+l5PfbTTLwP3Y73w/lYzVYVEZEzpKJfjPnW9l+N8+hkaNAI+/YMvKOHYrd/EezQRESkBFLRLwFMlWo4dzyIk3ofHDmE958j8M6YgD3wc7BDExGREkRL9koIYww0T8Rp2Az7/lvYD9/Fbv43ptcNmKQrMI4r2CGKiEgxp5Z+CWPKheP0vhHn4YlQuy521lS8j/8Du2NrsEMTEZFiTkW/hDLn1sIZNgpzyz1wIAfv2HvxvjIZe/BAsEMTEZFiSt37JZgxBpPQ1red74I3sWnvYTd8jOl9A6bN5ZjT2I9ZRERKP1WFUsCER+D0uwnnoYlQozb21efwPnEPdue2YIcmIiLFiIp+KWJqnIfzjzGYvw2DnCy8Y/6B9+VnsQf3Bzs0EREpBtS9X8oYYzCJydiml2EXvoVd9h52wxpMz+sxSV0wLs3yFxEpq9TSL6VM+Qicfn/FeXgSnFcP+8a/8I66C7vt82CHJiIiQaKiX8qZc2vhDH0M57YRcOQw3qfuwzttHDYnO9ihiYhIgKl7vwwwxsAlrXAaXYr94B3ff5v/jbmyH6ZTT0xoWLBDFBGRAFBLvwwx5crh9LwO57EpcFEz7LxX8T48GLtxLdbaYIcnIiJnmYp+GWSqVMN1+304Qx+D0DC8z43BO/4h7A/fBjs0ERE5i1T0yzDTsBnOQxMx190K3+7A+9ideGe9gD10MNihiYjIWaAx/TLOuFyY9l2xCW2x783CZizGfrIS0/M6TLvOWuInIlKKqKUvABh3JM51g3AemgA1z8fOesG3xO+LTcEOTURE/ERFX05gap6Pc/donNtGQt4xvOMfomDyaOxPu4MdmoiI/Ekq+vI7xhjMJS1xHp2C6XMjfPUZ3ocH4509HXs4N9jhiYjIGQrYmP7ChQtJT0/HGEOtWrVITU3l559/ZsKECeTm5lKnTh3uuOMOQkJCOH78OJMnT+Y///kPFStW5K677qJq1aoAzJs3j/T0dBzH4a9//SvNmjUL1COUOSY0FNO5D7ZlB+z813yn+H28HNNrAKZtJ4yj8X4RkZIkIC19j8fD4sWLGTt2LOPGjcPr9bJmzRpee+01unbtyqRJk6hQoQLp6ekApKenU6FCBZ599lm6du3K66+/DsCuXbtYs2YNzzzzDPfffz/Tp0/H6/UG4hHKNFMpCufGO3AeeAaq18K+9hzeUUOxX24OdmgiInIaAta97/V6ycvLo6CggLy8PCpXrsyWLVtITEwEIDk5mczMTADWrVtHcnIyAImJiXz++edYa8nMzKRVq1aEhoZStWpVqlWrxtdffx2oRyjzTO26OP8YgzPoly19n3mQgimPa7xfRKSECEj3fnR0NN27d+e2224jLCyMpk2bEhcXR0REBK5floRFR0fj8XgAX89ATEwMAC6Xi4iICA4ePIjH46F+/fon3PfX90hgGGPg0lY4TeJ93f3vv4334cGYjt0wXftjItzBDlFERE4hIEU/NzeXzMxMpkyZQkREBM888wybNp16KdjJtoQ1xhR5q9i0tDTS0tIAGDt2LLGxsWcW+CmEhIT4/Z4l0g2DKOjWj9xZ/+Lo0ndhbQYVrr2Z8p26Y1z/+1tLefQf5dJ/lEv/US79x1+5DEjR/+yzz6hatSqRkZEAtGjRgq+++orDhw9TUFCAy+XC4/EQHR0NQExMDNnZ2cTExFBQUMDhw4dxu92F13/12/f8VkpKCikpKYVfZ2Vl+fV5YmNj/X7PEu3qm3FadsD71nQOvvAUBxfOxul/E6Zh8z98m/LoP8ql/yiX/qNc+s/p5rJ69eonvR6QMf3Y2Fi2b9/OsWPHsNby2WefUbNmTRo1asTatWsByMjIID4+HoBLL72UjIwMANauXUujRo0wxhAfH8+aNWs4fvw4e/fu5ccff6RevXqBeAT5H3zj/Y/7jvDNO4Z3/MMUPDsKu2dXsEMTEZFfGBug49Vmz57NmjVrcLlcnH/++QwaNAiPx/O7JXuhoaHk5eUxefJkdu7cidvt5q677uKcc84BYO7cuSxfvhzHcRg4cCDNm/9xaxJg927/TjTTb69/zB4/jl32Hvb92XA8D5PUBdP9Gow78oTXKY/+o1z6j3LpP8ql//irpR+woh9MKvrBYQ/87NvPf+WHUL48pts1mPZXYkJCAeXRn5RL/1Eu/Ue59J8S1b0vZZOJrIwzIBXn4YlwfgPs7Ol4Hx6M3bi2yJMyRUTEf1T05awzNc7DNfRRnCEPgysE73Nj8I57gOP/+SrYoYmIlCk6WlcCxjS+FKdhM+zKJdj3Xsfzj5swLTv4tvWNigl2eCIipZ6KvgSUcbkw7a/EtmhHePpCDr8/G7tuNeaKqzBX9MaUCw92iCIipZa69yUoTISbigMH4zz2HKZxPHbBm3jvH4R39VKstyDY4YmIlEoq+hJUpko1nEHDcYY/CTFVsC8/i3fUMOwXp96xUUREzoyKvhQLpt5FOCP+ibnlHjhyCO/4hyiY9Bj2x++DHZqISKmhoi/FhjEGJ6EtzqjnMH1uhK+/wPvIHXhfew57ICfY4YmIlHiayCfFjgkNw3Tug22dgl3wJnblB9i1KzCde2M69cKUKxfsEEVESiS19KXYMhUr4Vx3K84jk6FhU+y7r+N9YBDej5Zpsp+IyBlQ0Zdiz1SrgSv1Ppx7noCoGOxLE/GOGor9YmOwQxMRKVFU9KXEMA0a4Yx86pfJfod9J/lNeBi765tghyYiUiKo6EuJ8t/Jfs9j+v0Vdm7D+9hdeF+ahM3JDnZ4IiLFmibySYlkQkMxl1/lm+z3/mzs8vexmSsxKb18E/7KRwQ7RBGRYkctfSnRTIWKOP3/5tvZr2kL7KLZeO+/Fe/yRdj8/GCHJyJSrKjoS6lgqlTDueUenPvGwbk1sbOm4n3kDh3jKyLyGyr6UqqYOvVx/jEG5/b7wRjfMb7/HIndsTXYoYmIBJ3G9KXUMcZAsxY4jeOxq5di35uFd+y9cGkrnN5/wVStHuwQRUSCQkVfSi3jcmGSOmNbtMN+OB+7ZB7eTZ9gkrtgul6NqRgZ7BBFRAJKRV9KPRMegelxHbZdZ9+2vsvfx65ZhuncB5PSAxOmbX1FpGzQmL6UGaZyNM4NqTiPPAsNLsbOexXvA7dpW18RKTNU9KXMMefWwjX4AZx7xkDl6P9u6/v5Bs30F5FSTUVfyizT4OJftvW9F44dxTvxEbzjH8J+tyPYoYmInBUa05cyzRiDSWiDbd4Cu+ID7MI38Y4ehmmRjOl1PSamarBDFBHxGxV9EcCEhGI6dse2bI/94B1s2gLsutWYjt0wXfphKriDHaKIyJ+m7n2R3zARbpzeN+KMfh5zmW+pn/e+W/B+OA97PC/Y4YmI/Ckq+iInYaKr4Pz1TpyHJkBcA+zbM30z/deka6a/iJRYKvoif8DUrIPrzkdwho2CipWwMyf8MtN/vWb6i0iJo6IvUgTmoqY49z2NueWeX2b6P4r3mQex334d7NBERIpME/lEisg4DiahLbZ5InbFkv/O9E9o65vprz39RaSYU9EXOU2+mf7dsK06YJfMxS59F7thDaZNJ0y3azCVo4MdoojISanoi5whUz4C02sAtn1X7MK3sKuWYD9Ox3TsgencGxOhZX4iUrwEpOjv3r2b8ePHF369d+9e+vfvT1JSEuPHj2ffvn1UqVKFoUOH4na7sdYyc+ZMNm7cSLly5UhNTSUuLg6AjIwM5s6dC0Dv3r1JTk4OxCOInJKpFIW5fhC2U0/su7Owi+dgV3yAubIvpn1XHegjIsVGQCbyVa9enaeeeoqnnnqKJ598krCwMC677DLmz59P48aNmTRpEo0bN2b+/PkAbNy4kT179jBp0iRuueUWXnzxRQByc3OZM2cOY8aMYcyYMcyZM4fc3NxAPILI/2Sqnotz8904D06AuAuwc17Ce/8gvKs+xBZomZ+IBF/AZ+9/9tlnVKtWjSpVqpCZmUlSUhIASUlJZGZmArBu3TratWuHMYYGDRpw6NAhcnJy2LRpE02aNMHtduN2u2nSpAmbNm0K9COI/CFTOw7XnQ/j/GMMRMdiX5mM9+HB2HWrsV5vsMMTkTIs4GP6H330Ea1btwZg//79REVFARAVFcWBAwcA8Hg8xMbGFr4nJiYGj8eDx+MhJiam8Hp0dDQej+d3n5GWlkZaWhoAY8eOPeFe/hASEuL3e5ZFpT6PscnYVkkc+2QVua+/QMEL/yQk7gLcA24lrFkLjDF++6hSn8sAUi79R7n0H3/lMqBFPz8/n/Xr13Pdddf94etOtunJqf6BPNn1lJQUUlJSCr/Oyso6zUj/WGxsrN/vWRaVmTzWbYh94BnMv1eS/+7r/PzYMGhwMU7vv2DqXuiXjygzuQwA5dJ/lEv/Od1cVq9+8iXEAe3e37hxI3Xq1KFy5coAVKpUiZycHABycnKIjIwEfC373z5cdnY2UVFRREdHk52dXXjd4/EU9hSIFGfGceG0bI8z6nnMtbfAnl14x95LweTR2B++DXZ4IlJGBLTo/7ZrHyA+Pp4VK1YAsGLFChISEgqvr1y5Emst27ZtIyIigqioKJo1a8bmzZvJzc0lNzeXzZs306xZs0A+gsifYkJDcTp0w3n8BUyvAbDtc7yPDsE7/Rnsvj3BDk9ESrmAde8fO3aMTz/9lFtuuaXwWq9evRg/fjzp6enExsYybNgwAJo3b86GDRsYMmQIYWFhpKamAuB2u+nTpw8jR44EoG/fvrjdWgstJY8JL4/p2h+b3AW7+B1s+kJs5ipM2yswXftrgx8ROSuMLQOnhuzevduv99M4lX8oj/9lf872bfCzeim4XJgO3TGd+2AqFO2XWuXSf5RL/1Eu/adEjumLyMmZyjE4A1JxHnsO07wldslcvCNvxvv+bOyxo8EOT0RKCRV9kWLEVD0X5+934zw0ARo0ws5/De99t+BdthB7/HiwwxOREq7IRX/hwoV88803AGzbto3bbruNwYMHs23btrMVm0iZZWrWwTX4AZwR/4Rza2Hf/BfeBwbhXb1Uu/uJyBkrctF///33qVq1KgBvvPEG3bp1o3fv3rz00ktnKzaRMs/UvRDn7tE4Qx+FyMrYl5/F+/BgvJmrtLufiJy2Ihf9w4cPExERwZEjR/jmm2/o0qULHTp08PskORE5kTEG07A5zn1P46TeBy4X9l9P4R01FLs586SbWYmInEyRl+zFxMTw1Vdf8f3333PRRRfhOA6HDx/GcTQtQCQQjDHQPBGnaQL2k1XY92bhnTwK6l5I3o23w7nnBTtEESnmilz0BwwYwDPPPENISAh33303ABs2bKBevXpnLTgR+T3juDCJydj4Ntg1adgFb5Hz0B1wYROcXgP8trWviJQ+f2qdfn5+PuA7CKA40zr94kl59A97PI+IdavIffslOLgfGsfj9Lwec17dYIdWIun70n+US//x1zr9IlfrXbt24Xa7qVy5MkePHuW9997DcRy6d+9e7Iu+SGlmQsOo0P1qDjdvhV3+PvaDuXhHD4VLWuL0uA5TQ93+IuJT5AH5iRMncvjwYQBeeeUVvvzyS7Zt28a//vWvsxaciBSdCS+P06UvzhPTMN2vhS82+fb1nzYOu+eHYIcnIsVAkZvo+/bto3r16lhryczMZNy4cYSFhTF48OCzGZ+InCYTUQHT41psh67YD+dhly3ErluFSWyP6XY1pkq1YIcoIkFS5KIfGhrKkSNH2LVrFzExMURGRlJQUMBx7RImUiwZdySm943YlB6+Q31WfIBduxzTqqPvUJ/Yc4IdoogEWJGLfuvWrXnsscc4cuQInTt3BmDnzp2FG/aISPFkIqMwV/8de0Vv7Ae/FP+P0/9b/GP0MyxSVhS56A8cOJDNmzfjcrm4+OKLAd+64RtvvPGsBSci/mMqR2OuudlX/Be/jV31IXZNOqZNCubKfpjoKsEOUUTOstOadt+0aVOysrLYtm0b0dHR1K2rJUEiJY2JisFcNwjbuQ928RzsqqXYj9IwbS73Hecbo+IvUloVuejn5OQwYcIEtm/fjtvt5uDBgzRo0IA777yT6OjosxmjiJwFJroK5vrbfMV/0S8t/1UfYlp3xHTpqzF/kVKoyEV/2rRpnHfeeYwcOZLw8HCOHj3KG2+8wbRp0xg+fPjZjFFEziITUxVzw+3YK/tjP5iDXf1Ly79lB1+3v2b7i5QaRV6n/9VXX/GXv/yF8PBwAMLDwxkwYICO1hUpJUxMFZzrb8N5/F+YpC7YtRm+43xnTsT+pIO1REqDIhf9ChUqsGvXrhOu7d69m4iICL8HJSLBY6Jjca69xbfJT4du2MxVeB9MxfviOOyP3wc7PBH5E4rcvd+jRw9GjRpFhw4dqFKlCvv27SMjI4Orr776bMYnIkFiKkf7lvp17uPb5CdjMfaTlZhLWvmW+tWqE+wQReQ0Fbnop6SkUK1aNVavXs13331HVFQUgwcPZuvWrWczPhEJMlMpCtPvJmznvti0d7HpC7HrP4Kml+F0vRpTp36wQxSRIvpTp+wdP36cAQMG8NZbb/kzJr/TKXvFk/LoP4HMpT2U6yv8ae/B4Vxo1Byn29WYeg0D8vlnm74v/Ue59J+An7InIgJgKrgx3a/BduqBXb4Yu3Q+3idHwAWNcbr2hwubYIwJdpgichIq+iJyRkx4BKZLH2yHbthVS7BL5uJ95kGIuwDnyv7QJF7FX6SY+Z9F//PPPz/l3+Xn5/s1GBEpeUy5cpiUHtikLtg1y7CL5+CdPApq1sHp2g8uaYlxXMEOU0QoQtF//vnn//DvY2Nj/RaMiJRcJjQUk9QZ2zoF+8lK7OK38b7wT6hW07fD32XtMCHqXBQJpv/5EzhlypRAxCEipYQJCcG06oBNTMKu/xi7aDZ25gTsgjcwV/T2bfMbGhbsMEXKJP3aLSJnhXFcmIQ22PjW8Gkm3vdnY19/HrvwLczlvTBJnTHlwoMdpkiZoqIvImeVMca3pr9JAmz91Ff8356BXfw2pmN3TPtumAruYIcpUiao6ItIQBhj4KKmuC5qit2xFe+it7HvzsIumYdJvhLTqQcmMirYYYqUair6IhJwpu6FuO54EPv9TuziOdglc7HLFmDadMJccRUmpmqwQxQplVT0RSRoTK06mFvuwfa4DvvBO9iVS7ArP8BcloTp0gdzbq1ghyhSqgSs6B86dIipU6fy/fffY4zhtttuo3r16owfP559+/ZRpUoVhg4ditvtxlrLzJkz2bhxI+XKlSM1NZW4uDgAMjIymDt3LgC9e/cmOTk5UI8gImeJqVYDM3AItse12A/n+zb7WbscmifidOmLOV/7+4v4Q8CK/syZM2nWrBl33303+fn5HDt2jHnz5tG4cWN69erF/PnzmT9/PgMGDGDjxo3s2bOHSZMmsX37dl588UXGjBlDbm4uc+bMYezYsQCMGDGC+Ph43G5NAhIpDUx0Fcw1N2O79scuW4Bd/j7eDR/DRU1xruwHFzTWLn8if4ITiA85fPgwX375JR06dAAgJCSEChUqkJmZSVJSEgBJSUlkZmYCsG7dOtq1a4cxhgYNGnDo0CFycnLYtGkTTZo0we1243a7adKkCZs2bQrEI4hIAJmKlXB6DcAZOx3TdyD88C3ecQ/gfeIe7Ka1WK832CGKlEgBaenv3buXyMhInnvuOb799lvi4uIYOHAg+/fvJyrKN1s3KiqKAwcOAODxeE7Y6S8mJgaPx4PH4yEmJqbwenR0NB6P53efl5aWRlpaGgBjx471+66BISEh2onQD5RH/ynVubz+Fmy/GzmSvojD81+nYMoYXLXqUKH3AMLbdPL7Ln+lOrAO2jcAAB1HSURBVJcBplz6j79yGZCiX1BQwM6dO7npppuoX78+M2fOZP78+ad8/clO+z1Vl97JrqekpJCSklL4tb+PdtRxkf6hPPpPmchlfFts81aYzFUUfPAOByaO4sBrL/x3l7+wcn75mDKRywBRLv3HX0frBqR7PyYmhpiYGOrX903GSUxMZOfOnVSqVImcnBwAcnJyiIyMLHz9bx8uOzubqKgooqOjyc7OLrzu8XgKewpEpPQzLhdOYjLOQxNxBj8AlaOxs6biHfF3vIvnYA8fCnaIIsVaQIp+5cqViYmJYffu3QB89tln1KxZk/j4eFasWAHAihUrSEhIACA+Pp6VK1dirWXbtm1EREQQFRVFs2bN2Lx5M7m5ueTm5rJ582aaNWsWiEcQkWLEOA6m6WU4w5/E+ccYqB2HnfuKr/jPfQV7ICfYIYoUSwGbvX/TTTcxadIk8vPzqVq1KqmpqVhrGT9+POnp6cTGxjJs2DAAmjdvzoYNGxgyZAhhYWGkpqYC4Ha76dOnDyNHjgSgb9++mrkvUoYZY+CCi3FdcDH22x2+jX4+eAe79F1MmxTM5VdhqlQLdpgixYaxJxtAL2V+7WHwF41T+Yfy6D/K5X/Zn3b7dvj7OB0KvJiENpjOfTC16hTp/cql/yiX/uOvMX3tyCcipYo5pzrmL4N9u/ylvYddsRj7yUq4+FKcLn2gfiOt9ZcyS0VfREolUzka03cg9sq+2IzF2LT38D51H9RpgHPFVdA8EeO4gh2mSECp6ItIqWYi3Jgr+2FTemDXLMN+OB/v1CehSjVMp16YVh0x5fyz3E+kuFPRF5EywYSVwyRfiW13BWz6N94l87CzpmLfex2T3BXT/kpMZOVghylyVqnoi0iZYhwXXNIKp3lL2PGlr/i//xb2g3cwrTqQ3/+vUC4i2GGKnBUq+iJSJhljoF5DXPUaYvfswi59F7smneyVS6BJAk5KD7iwiSb9Samioi8iZZ6pVhNzw+3YntdR/pMVHFr0Dt5nHoSadTCdemAS2mFCQ4MdpsifFpAd+URESgITGYX7mr/jPDkd85fB4C3AzpyId+Tf8S58C3vwQLBDFPlT1NIXEfl/TGgYpu3l2Dad4MtNeJe+i333deyitzGJyZiUHpjqtYMdpshpU9EXETkFYww0bI6rYXPsj9/7Nvv5eDl21YfQqDlOp17QsJnG/aXEUNEXESkCc24t37h/rxt8u/xlLMI74WGoXtvX8m+R5LfjfUXOFo3pi4icBlMxEqfb1ThPvIj5613gcmFfmYx3+N/wvvs6dr9O+JPiSy19EZEzYEJDMa06YFu2h22f+8b935/tW++f0M7X+q8dF+wwRU6goi8i8if4jvdtjOuCxr4T/pa9h12T7jvl74LGvvX+TeK1z78UCyr6IiJ+Ys6pjrluELbnAOzqpdj0hXinPO7b579jd0zrjphw7fYnwaOiLyLiZ6aCG3PFVdiUHrDxY7xp72HfnIZ993VMm06YDt0wsecEO0wpg1T0RUTOEuNyQXwbXPFtsP/5yrfkb9kCbNoCaN4Cp2MPqN9QS/4kYFT0RUQCwMRdgLnlHqxnIDZjEXbFErwbPobadX1d/wlttdWvnHVasiciEkAmugpO7xtx/jkTMyAVjudhZ07AO+JveBe8iT3wc7BDlFJMLX0RkSAw5cphkjpj210BX2zyjfu/Nwu7aDbmsiRMx26Y2nWDHaaUMir6IiJBZIyBRs1xNWqO/XEXNn2Bb8nfmmVQryFOx27QLBETon+u5c/Td5GISDFhzq2Juf427FU3YFen+bb6feGfUDkGk9wF0+4KTMVKwQ5TSjAVfRGRYsZEuDGX98KmdIfP1uNNX4id/xp24VuYy9qp61/OmIq+iEgxZRwXNL0MV9PLfKf8pb+P/fiXrv+6F2KSr8Rc2lqz/qXINHtfRKQEMOfWwrl+EM4/Z2Cu/hvkHsROfwbv8JvwznsVm70v2CFKCaCWvohICWIi3JiUntgO3WHrZrzLF2EXv4Nd/A40TcBpfyVc2BTjqE0nv6eiLyJSAhnHgYbNcTVsjs3ei13xAXb1Uryb/g1Vq2Pad8G07Iip4A52qFKMqOiLiJRwJqYqpvdfsN2vxa7/yLfj31vTsfNe9a35b3+lJv4JoKIvIlJqmNBQTGIyJCZjv9uBzViM/fcK7OqlEHeBb+JffGtMaFiwQ5Ug0aCPiEgpZGrXxfnLYJynZmKu/jscysXOGI/33pvwvvMydt+eYIcoQaCWvohIKeab+NcD27E7bP0U7/L3sUvmYZfMhYsvxWnfFRo118S/MkJFX0SkDDDGwEVNcV3UFOvZh131IXblEryTHoUq1TBJnTGtUzDuyGCHKmdRwIr+7bffTnh4OI7j4HK5GDt2LLm5uYwfP559+/ZRpUoVhg4ditvtxlrLzJkz2bhxI+XKlSM1NZW4uDgAMjIymDt3LgC9e/cmOTk5UI8gIlIqmOgqmJ7XY7v2x25c65v4N+cl7PzXfUf8tu+KqVM/2GHKWRDQlv7DDz9MZOR/f4ucP38+jRs3plevXsyfP5/58+czYMAANm7cyJ49e5g0aRLbt2/nxRdfZMyYMeTm5jJnzhzGjh0LwIgRI4iPj8ft1pIUEZHTZUJCMQltIaEt9odvfRP/Pl6O/Tgdzqvnm/Uf3xZTrlywQxU/CeogTmZmJklJSQAkJSWRmZkJwLp162jXrh3GGBo0aMChQ4fIyclh06ZNNGnSBLfbjdvtpkmTJmzatCmYjyAiUiqYGuf5dvx7aibmukFwPA/70iS89/4V7+zp2J92BztE8YOAtvQff/xxADp16kRKSgr79+8nKioKgKioKA4cOACAx+MhNja28H0xMTF4PB48Hg8xMTGF16Ojo/F4PL/7nLS0NNLS0gAYO3bsCffyh5CQEL/fsyxSHv1HufQf5RKo9Rds3xs4/sUmDn8wj2PpC7FL3yWsaQLlu/SmXHxrjOt/lw/l0n/8lcuAFf1Ro0YRHR3N/v37GT16NNWrVz/la621v7tmjDnpa092PSUlhZSUlMKvs7KyziDiU4uNjfX7Pcsi5dF/lEv/US5/45xacOMQnF43YFcvJW/lB+SNHQlRsZh2l2PaXI6pHH3KtyuX/nO6uTxVjQ1Y9350tO8bo1KlSiQkJPD1119TqVIlcnJyAMjJySkc74+JiTnh4bKzs4mKiiI6Oprs7OzC6x6Pp7CnQEREzg5TKQqna3+cMdNwbr8Pzq2FfXcW3hF/o2DqWOyXm0/aWJPiJyBF/+jRoxw5cqTwz59++im1a9cmPj6eFStWALBixQoSEhIAiI+PZ+XKlVhr2bZtGxEREURFRdGsWTM2b95Mbm4uubm5bN68mWbNmgXiEUREyjzjcmGaJeIa+ijO6KmYjt1h62d4n3kQ74OpeJe+iz10MNhhyh8ISPf+/v37efrppwEoKCigTZs2NGvWjLp16zJ+/HjS09OJjY1l2LBhADRv3pwNGzYwZMgQwsLCSE1NBcDtdtOnTx9GjhwJQN++fTVzX0QkCMw51TH9bsL2GoBd9xF2xWLs7F/2+49vg0nugv3NHCwpHowtA30yu3f7d9apxqn8Q3n0H+XSf5TLM2e/3+kr/mtXwLEjhNSpT0GrjpgWyZjyEcEOr0QrcWP6IiJSupladXAGpOI8PRNz/W0A2Nen4r1nIN5XJmN3btfYf5BpG14REfErEx6BSe5CdJ8BZK37GLtyie+0v1UfQu04TLvOmBbtMOFq/Qeair6IiJwVxhhMnQaYOg2w/W7yFf6VH2Bfew779gzMZe0wbS+H8+ufclm2+JeKvoiInHUmogKm/ZXY5C6wc5uv+P/a+q9xHqbt5ZjEZEyFisEOtVRT0RcRkYAxxkDcBZi4C7D9/47NXIVdvRT75jTsnJcwl7TCtO0EDS7Wcb9ngYq+iIgEhYmogEnqDEmdfTP/Vy/Frl2O/WSF77jf1imYVh0xUVr65y8q+iIiEnSmVh3Mtbdg+9yI3fCx7xeA+a9h350FjZrjtEmBJpdhQkODHWqJpqIvIiLFhgkrh0lMhsRk7N7d2I/SsWuW4Z36JLgr+tb8t07B1KoT7FBLJBV9EREplkzV6pirBmB7XgtfbMZ+lObb/GfZAqhdF9MmxbcCQJP/ikxFX0REijXjuODiSzAXX4LNPYD9ZKXvF4BZL2BnT8c0bYFp3REaNse4XMEOt1hT0RcRkRLDuCMxHbpBh27Y73Zg16T7lv6t/wgqRfmW/bXsiKlRO9ihFksq+iIiUiKZ2nUxteti+w6Ez9bjXbMMm/Yedsk834Y/rTpiLmur7v/fUNEXEZESzYSEQvNEXM0TsQd+xn6ywjcBcNZU7OwX1f3/Gyr6IiJSapjIypiUnpDSE/vdf7Afp2PXZvy3+79Fsq8HoIx2/6voi4hIqWRqx2Fqx2H73Pjf7v9l72E/nAfn1cO07ohJaItxRwY71IBR0RcRkVLt1N3/vtn/NL0Mp2VH3wqBUt79r6IvIiJlxu+6/9csw/57Bd71ayCy8i+z/ztgap4f7FDPChV9EREpkwq7//sOhM/X4/0oHbtsAfbD+b7Nf1p19G3+U7H0dP+r6IuISJlmQkKhWSKuZonYg/t9m/+sWYZ981/Yt2dAk3icVh3g4nhMSMkumyU7ehERET8yFSthOnaHjt2xu3b6xv7/nYF341pwR2JaJGFadoDacb5jgksYFX0REZGTMDXrYK7+m2/2/5aNeD9e9t+9/2uc5xv7b5GEqRwd7FCLTEVfRETkD5iQEGiagKtpAvbQQWzmKt/2v3NmYt95GRo1x7Rsj2nWAhNWLtjh/iEVfRERkSIyFSpikq+E5Cuxe3b9svd/Bnba09jyEZhLWvm6/+s3xDhOsMP9HRV9ERGRM2Cq1cT0/gu21wDY9jn24+XYdR9hP0qDmKq+5X+J7THVagQ71EIq+iIiIn+CcRy4sAnmwibY627Fbvq3b/vfRXOw78+GOg183f/FYPc/FX0RERE/MeXCMS2SoEUS9uds7L9X+n4BmPUC9q3p0PhSnJbtoXECJjQ04PGp6IuIiJwFpnIM5oqr4IqrsN/vxK5d7tv9b9O/IcKNSWiDSWwPdS8M2PI/FX0REZGzzNSqg6lVB9v7Rvhys2/8/+N07IoPoEo1nEEjMLXjznocKvoiIiIBYlwu38E+F1+CPXoYu/5jbOZKiD0nIJ+voi8iIhIEJjwC07ojtO4YsM8sfosIRURE5KwIaEvf6/UyYsQIoqOjGTFiBHv37mXChAnk5uZSp04d7rjjDkJCQjh+/DiTJ0/mP//5DxUrVuSuu+6iatWqAMybN4/09HQcx+Gvf/0rzZo1C+QjiIiIlFgBbekvWrSIGjX+u0nBa6+9RteuXZk0aRIVKlQgPT0dgPT0dCpUqMCzzz5L165def311wHYtWsXa9as4ZlnnuH+++9n+vTpeL3eQD6CiIhIiRWwop+dnc2GDRvo2NE3dmGtZcuWLSQmJgKQnJxMZmYmAOvWrSM5ORmAxMREPv/8c6y1ZGZm0qpVK0JDQ6latSrVqlXj66+/DtQjiIiIlGgBK/ovvfQSAwYMKFyLePDgQSIiInC5XABER0fj8XgA8Hg8xMTEAOByuYiIiODgwYMnXP//7xEREZE/FpAx/fXr11OpUiXi4uLYsmXL/3y9tfZ314wxJ71+MmlpaaSlpQEwduxYYmNjTy/g/yEkJMTv9yyLlEf/US79R7n0H+XSf/yVy4AU/a+++op169axceNG8vLyOHLkCC+99BKHDx+moKAAl8uFx+MhOtp3JnFMTAzZ2dnExMRQUFDA4cOHcbvdhdd/9dv3/FZKSgopKSmFX2dlZfn1eWJjY/1+z7JIefQf5dJ/lEv/US7953RzWb169ZNeD0j3/nXXXcfUqVOZMmUKd911FxdffDFDhgyhUaNGrF27FoCMjAzi4+MBuPTSS8nIyABg7dq1NGrUCGMM8fHxrFmzhuPHj7N3715+/PFH6tWrF4hHEBERKfGMLWqfuZ9s2bKFBQsWMGLECH766affLdkLDQ0lLy+PyZMns3PnTtxuN3fddRfnnOPbrWju3LksX74cx3EYOHAgzZs3D2T4IiIiJZeV0zZ8+PBgh1AqKI/+o1z6j3LpP8ql//grl9qRT0REpIxQ0RcRESkjXI888sgjwQ6iJIqLO/tHIJYFyqP/KJf+o1z6j3LpP/7IZcAn8omIiEhwqHtfRESkjFDRFxERKSMCerRuSbdp0yZmzpyJ1+ulY8eO9OrVK9ghlRjPPfccGzZsoFKlSowbNw6A3Nxcxo8fz759+6hSpQpDhw7F7XYHOdLiLysriylTpvDzzz9jjCElJYUrr7xS+TwDeXl5PPzww+Tn51NQUEBiYiL9+/c/5bHf8seKeny6/LHbb7+d8PBwHMfB5XIxduxY//18+2XhXxlQUFBgBw8ebPfs2WOPHz9u//GPf9jvv/8+2GGVGFu2bLE7duyww4YNK7z26quv2nnz5llrrZ03b5599dVXgxVeieLxeOyOHTustdYePnzYDhkyxH7//ffK5xnwer32yJEj1lprjx8/bkeOHGm/+uorO27cOLt69WprrbUvvPCCXbJkSTDDLDEWLFhgJ0yYYJ944glrrVUez1Bqaqrdv3//Cdf89fOt7v0i+vrrr6lWrRrnnHMOISEhtGrVqvAoYPnfGjZs+LvfSjMzM0lKSgIgKSlJ+SyiqKiowlm85cuXp0aNGng8HuXzDBhjCA8PB6CgoICCggKMMac89ltO7XSOT5fT56+fb/WzFNH/P9Y3JiaG7du3BzGikm///v1ERUUBvkJ24MCBIEdU8uzdu5edO3dSr1495fMMeb1ehg8fzp49e7jiiis455xzTnnst5zar8enHzlyBPjj49Plf3v88ccB6NSpEykpKX77+VbRLyJ7iuN+RYLl6NGjjBs3joEDBxIRERHscEosx3F46qmnOHToEE8//TQ//PBDsEMqcU73+HT5Y6NGjSI6Opr9+/czevToU56YdyZU9Ivo/x/rm52dXfhbl5yZSpUqkZOTQ1RUFDk5OURGRgY7pBIjPz+fcePG0bZtW1q0aAEon39WhQoVaNiwIdu3bz/lsd9ycqd7fLr8sV/zVKlSJRISEvj666/99vOtMf0iqlu3Lj/++CN79+4lPz+fNWvWFB4FLGcmPj6eFStWALBixQoSEhKCHFHJYK1l6tSp1KhRg27duhVeVz5P34EDBzh06BDgm8n/2WefUaNGjVMe+y0nd7rHp8upHT16tHCI5OjRo3z66afUrl3bbz/f2pHvNGzYsIGXX34Zr9dL+/bt6d27d7BDKjEmTJjAF198wcGDB6lUqRL9+/cnISGB8ePHk5WVRWxsLMOGDdMSsyLYunUrDz30ELVr1y4cYrr22mupX7++8nmavv32W6ZMmYLX68VaS8uWLenbt+8pj/2W/60ox6fLqf300088/fTTgG9yaZs2bejduzcHDx70y8+3ir6IiEgZoe59ERGRMkJFX0REpIxQ0RcRESkjVPRFRETKCBV9ERGRMkKb84gIU6ZMISYmhmuuuSbgn22t5fnnnyczM5Nq1arxxBNPBDwGkbJCRV+kGLr99tvJy8vj2WefLTwQZtmyZaxatYpHHnkkuMH52datW/n00095/vnnC5/1t/Lz85k1axZr1qzh0KFDREZGkpCQwMCBAwFfrm699VaaNGkS4MhFSh4VfZFiqqCggEWLFpW4TaC8Xi+OU/SRw1/PBz9ZwQeYN28eO3bsYMyYMURFRbFv3z6+/PJLf4UrUqao6IsUUz169ODdd9/liiuuoEKFCif83d69exk8eDBvvPFG4SlmjzzyCG3btqVjx45kZGSwbNky6tatS0ZGBm63mzvuuIMff/yRt956i+PHjzNgwACSk5ML73ngwAFGjRrF9u3bqVOnDoMHD6ZKlSoA/PDDD8yYMYP//Oc/REZGcvXVV9OqVSvANzQQFhZGVlYWX3zxBffcc8/vWt0ej4dp06axdetW3G43PXv2JCUlhfT0dKZPn05+fj433HAD3bt3p3///ie8d8eOHVx22WWF+5FXrVqVqlWrAvDss8+SlZXFk08+ieM49O3bl549e7Jt2zZeeeUVdu3aRZUqVRg4cCCNGjUqzFODBg347LPP2L17N40aNSI1NRW3201eXh5Tp05l06ZNeL1ezj33XIYPH07lypX99H9VJLg0kU+kmIqLi6NRo0YsWLDgjN6/fft2zjvvPGbMmEGbNm2YMGECX3/9NZMmTeKOO+5gxowZHD16tPD1q1evpk+fPkyfPp3zzz+fSZMmAb79v0ePHk2bNm148cUXufPOO5k+fTrff//9Ce+96qqrePnll7nwwgt/F8vEiROJiYnhhRde4O677+aNN97gs88+o0OHDtx88800aNCAV1999XcFH6B+/fosXLiQJUuW8N13351w4uUdd9xBbGwsw4cP59VXX6Vnz554PB7Gjh1L7969mTFjBjfccAPjxo074SjSFStWcNttt/HCCy/gOA4zZswovH748GGef/55ZsyYwc0330xYWNgZ5V+kOFLRFynG+vfvz+LFi8/o7OyqVavSvn17HMehVatWZGdn07dvX0JDQ2natCkhISHs2bOn8PWXXHIJDRs2JDQ0lGuvvZZt27aRlZXFhg0bqFKlCu3bt8flchEXF0eLFi0KD1IBSEhI4MILL8RxnN8VyaysLLZu3cr1119PWFgY559/Ph07dmTlypVFeo6rrrqKnj17snr1akaMGMGgQYPIyMg45etXrlxJ8+bNueSSS3AchyZNmlC3bl02bNhQ+Jp27dpRu3ZtwsPDueaaa/j444/xer24XC5yc3PZs2cPjuMQFxenY4ulVFH3vkgxVrt2bS699FLmz59PjRo1Tuu9lSpVKvzzr4X4t93UYWFhJ7T0Y2JiCv8cHh6O2+0mJyeHffv2sX379sKJc+Cbb9CuXbuTvvf/y8nJwe12U758+cJrsbGx7Nixo0jP4TgOnTt3pnPnzuTl5ZGens7zzz9PvXr1qFmz5u9en5WVxdq1a1m/fv0J8f7avf//442NjaWgoIADBw7Qrl07srOzmTBhAocPH6Zt27Zcc801hITon0opHfSdLFLM9e/fn+HDh59wjO6vk96OHTtW2BL9+eef/9TnZGdnF/756NGj5ObmEhUVRUxMDA0bNuTBBx885Xt/Pe3vZKKiosjNzeXIkSOFhT8rK+uMzlYPCwujc+fOvP322+zateukRT8mJoa2bdsyaNCgU97nt8+alZWFy+UiMjISx3Ho168f/fr1Y+/evTzxxBNUr16dDh06nHasIsWRuvdFirlq1arRsmVLFi9eXHgtMjKS6OhoVq1ahdfrJT09nZ9++ulPfc7GjRvZunUr+fn5vPnmm9SvX5/Y2FguvfRSfvzxR1auXEl+fj75+fl8/fXX7Nq1q0j3jY2N5YILLmDWrFnk5eXx7bffsnz5ctq2bVuk97///vts2bKFvLw8CgoKyMjI4MiRI9SpUwfw9V7s3bu38PVt27Zl/fr1hZPx8vLy2LJlywmFftWqVezatYtjx44xe/ZsEhMTcRyHzz//nO+++w6v10tERAQhISGntRJBpLhTS1+kBOjbty+rVq064dqtt97Kiy++yBtvvEGHDh1o0KDBn/qM1q1b8/bbb7Nt2zbi4uIYMmQIAOXLl+eBBx7g5Zdf5uWXX8Zay3nnnceNN95Y5HvfeeedTJs2jVtvvRW3202/fv2KvK4+LCyMV155hT179mCM4dxzz+Xuu+/mnHPOAaBXr17MmDGD1157jd69e9OjRw/uvfdeXnvtNSZOnIjjONSrV4+bb7658J7t2rVjypQp7N69m4suuojU1FTA11sybdo0PB4P4eHhtGzZssi/nIiUBMb+diqsiEgp99uljSJljfqtREREyggVfRERkTJC3fsiIiJlhFr6IiIiZYSKvoiISBmhoi8iIlJGqOiLiIiUESr6IiIiZcT/AScFv8uHXj73AAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "plot_stats_in_graph(metric_dict, y_axis_label='Loss', x_axis_label='Number of Steps')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**TA**: Does that make sense now?\n", + "\n", + "**Student**: Yeah, somewhat. What about more complicated systems? Will I have to implement everything using barebone components like F.linear etc.?\n", + "\n", + "**TA**: You can use existing nn.Modules as components of new nn.Modules therefore, you are able of modularizing your network blocks, and then combining them at the end in one big network with very few lines of code. Pytorch already provides almost every kind of layer out there in their torch.nn package. Look at the [documentation](https://pytorch.org/docs/stable/nn.html) for more information. Now, let's see how we can combine modules to build a larger module. Let's build a multi layer fully connected module.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 163, + "metadata": {}, + "outputs": [], + "source": [ + "class MultiLayerFCCNetwork(nn.Module):\n", + " def __init__(self, input_shape, num_hidden_units, num_output_units, num_hidden_layers):\n", + " super(MultiLayerFCCNetwork, self).__init__()\n", + " self.input_shape = input_shape\n", + " self.num_hidden_units = num_hidden_units\n", + " self.num_output_units = num_output_units\n", + " self.num_hidden_layers = num_hidden_layers\n", + " \n", + " x_dummy = torch.zeros(input_shape)\n", + " \n", + " self.layer_dict = nn.ModuleDict() # Allows us to initialize modules within a dictionary structure.\n", + " out = x_dummy\n", + " for i in range(self.num_hidden_layers):\n", + " self.layer_dict['layer_{}'.format(i)] = LinearLayerWithActivation(input_shape=out.shape, \n", + " num_units=self.num_hidden_units, bias=True,\n", + " activation_type=nn.PReLU())\n", + " \n", + " out = self.layer_dict['layer_{}'.format(i)].forward(out)\n", + " \n", + " self.layer_dict['output_layer'] = LinearLayerWithActivation(input_shape=out.shape, \n", + " num_units=self.num_output_units, \n", + " bias=True, activation_type=nn.Identity())\n", + " out = self.layer_dict['output_layer'].forward(out)\n", + " \n", + " def forward(self, x):\n", + " out = x\n", + " for i in range(self.num_hidden_layers):\n", + " out = self.layer_dict['layer_{}'.format(i)].forward(out)\n", + "\n", + " out = self.layer_dict['output_layer'].forward(out)\n", + " return out\n", + " \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 173, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Parameters with name layer_dict.layer_0.weights and shape torch.Size([64, 128])\n", + "Parameters with name layer_dict.layer_0.bias and shape torch.Size([64])\n", + "Parameters with name layer_dict.layer_0.activation_type.weight and shape torch.Size([1])\n", + "Parameters with name layer_dict.layer_1.weights and shape torch.Size([64, 64])\n", + "Parameters with name layer_dict.layer_1.bias and shape torch.Size([64])\n", + "Parameters with name layer_dict.layer_1.activation_type.weight and shape torch.Size([1])\n", + "Parameters with name layer_dict.layer_2.weights and shape torch.Size([64, 64])\n", + "Parameters with name layer_dict.layer_2.bias and shape torch.Size([64])\n", + "Parameters with name layer_dict.layer_2.activation_type.weight and shape torch.Size([1])\n", + "Parameters with name layer_dict.layer_3.weights and shape torch.Size([64, 64])\n", + "Parameters with name layer_dict.layer_3.bias and shape torch.Size([64])\n", + "Parameters with name layer_dict.layer_3.activation_type.weight and shape torch.Size([1])\n", + "Parameters with name layer_dict.output_layer.weights and shape torch.Size([512, 64])\n", + "Parameters with name layer_dict.output_layer.bias and shape torch.Size([512])\n" + ] + } + ], + "source": [ + "fcc_net = MultiLayerFCCNetwork(input_shape=x.shape, num_hidden_units=64, num_output_units=512, \n", + " num_hidden_layers=4)\n", + "optimizer = optim.Adam(fcc_net.parameters(), amsgrad=False, weight_decay=0.0)\n", + "\n", + "\n", + "for name, params in fcc_net.named_parameters():\n", + " print('Parameters with name', name, 'and shape', params.shape)\n", + "\n", + "metric_dict = {'losses': []} \n", + " \n", + "for i in range(100):\n", + "\n", + " out = fcc_net.forward(x)\n", + " loss = F.cross_entropy(out, y)\n", + " fcc_net.zero_grad() #removes grads of previous step\n", + " optimizer.zero_grad() #removes grads of previous step\n", + " loss.backward() #compute gradients of current step\n", + " optimizer.step() #update step\n", + "\n", + " metric_dict['losses'].append(loss.detach().cpu().numpy()) #.detach: Copies the value of the loss \n", + "# and removes it from the graph, \n", + "# .cpu() sends to cpu, and \n", + "# numpy(), converts it to numpy format." + ] + }, + { + "cell_type": "code", + "execution_count": 174, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfQAAAEUCAYAAADdksQIAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAAgAElEQVR4nO3deXwV1f3/8deZm40kJCQ3hJCwbyrgxlIQBBEiouJGgWqVivpVkSpiLRWtrbZWRS0VUBaVzV2x/kRRcUGKqIgFAhZBCCiIETAkAQKEEJI5vz9GoghogJtMcvN+Ph55kNw7d+4nx2veM2fOmWOstRYRERGp0Ry/CxAREZHjp0AXEREJAwp0ERGRMKBAFxERCQMKdBERkTCgQBcREQkDEX4XcLwmTZpEVlYWiYmJjB079me3nTlzJqtWrQKgpKSEnTt3MnPmzCqoUkREpHLV+EDv1asX/fr1Y+LEib+47dChQ8u/nzt3Lhs2bKjEykRERKpOjQ/0tm3bkpube9BjW7duZdq0aRQWFhIdHc0NN9xARkbGQdt8/PHHDB48uCpLFRERqTQ1PtAP54knnuC6666jYcOGrFu3jqlTp3L33XeXP79t2zZyc3Np3769j1WKiIiETtgFenFxMWvXruVf//pX+WOlpaUHbfPxxx/TtWtXHEdjAkVEJDyEXaC7rktcXBwPP/zwEbdZtGgR1157bRVWJSIiUrnC7hQ1NjaW1NRUPvnkEwCstWzcuLH8+c2bN7Nnzx7atGnjU4UiIiKhZ2r6amvjxo1j9erV7Nq1i8TERAYPHkz79u158skn2bFjB6WlpXTv3p2BAwcCMGvWLPbv388VV1zhc+UiIiKhU+MDXURERMKwy11ERKQ2UqCLiIiEgRo/yn3z5s0h21dKSgp5eXkh219tpXYMDbVjaKgdQ0PtGBqhaMf09PTDPq4zdBERkTCgQBcREQkDCnQREZEwUOOvoYuISHix1lJcXIzruhhj/C4npL777jv27dv3i9tZa3Ech5iYmAq3gQJdRESqleLiYiIjI4mICL+IioiIIBAIVGjb0tJSiouLqVOnToW2V5e7iIhUK67rhmWYH62IiAhc163w9gp0ERGpVsKtm/14HE1bKNC/Z7dtZdeMCdii3X6XIiIictQU6Afs2knR6y9iV/zX70pERMRnrVu39ruEo6ZAP6B5G5z6DbBLP/K7EhERkaOmQP+eMYaYbr1h9Qp1u4uICOBNH7v33nvp3bs3ffr04bXXXgO86WcDBgzgnHPOoXfv3nz66aeUlZUxcuTI8m2feOIJADZu3MgVV1xBv379uOiii1i/fj0Ac+bMoXfv3mRmZjJgwIDjrlXDCH8kplsfil57AbviU0y3Pn6XIyJS67kvPon9ZkNI92kaN8e57LoKbfvWW2+xatUq3nvvPQoKCjj//PPp2rUrr776KmeddRa33HILZWVl7N27l1WrVrF161bmz58PwM6dOwH405/+xJgxY2jRogWfffYZd9xxBy+//DLjxo3jueeeo2HDhuXbHg8F+o9EtD4JgqnYpR+DAl1EpNb773//yyWXXEIgEKB+/fp07dqVzz77jNNOO43bbruN0tJSzj33XNq3b0+TJk3YtGkTd911F3369OGss85iz549LFu2jBtuuAHweoMP3FimU6dO3HrrrVx44YWcd955x12rAv1HjDGYjt2x78/B7tmNiYv3uyQRkVqtomfSlcVae9jHu3btyiuvvML777/PLbfcwrBhwxg0aBDvvfceCxYsYObMmcyZM4e//e1vJCQk8N577wHe3PLS0lIAHnzwQbKysnj//ffp27cv7777LsnJycdcq66h/4TpdCaUlWJXfOp3KSIi4rOuXbvy+uuvU1ZWRn5+Pp9++imnnXYaOTk5pKSkcMUVV3DZZZexcuVKCgoKcF2XCy64gFGjRrFy5Urq1q1L48aNmTNnDuAdIKxatQrwrq136NCBUaNGkZycfNzLgesM/aeatfq+2/0j6K5udxGR2uy8885j2bJlnHPOORhj+POf/0xqaiqzZs1iypQpREREEBcXx/jx49myZQt/+MMfyu/udscddwDw2GOPcccddzB+/HjKysq46KKLaNeuHf/4xz/YsGED1lrOPPNM2rVrd1y1Gnuk/oQa4niPaH7swMLz7r9nYOe9jjP2aUxc3ZDtv7Y40I5yfNSOoaF2DI2qbMeioiJiY2Or5L2q2o+73CvicG2Rnp5+2G3V5X4YXrd7GXb5Yr9LERERqRAF+uE0bQUpDbDLPva7EhERkQqpkmvoJSUl3H333ZSWllJWVkbXrl0ZPHjwQdvs37+fxx57jK+++oq6desycuRIUlNTq6K8QxhjMJ3OxL43G7u7EBOf4EsdIiK1UQ2/EhxSR9MWVXKGHhkZyd13383DDz/MQw89xIoVK8jOzj5om/nz5xMXF8ejjz7KBRdcwHPPPVcVpR1Rebf7pwt9rUNEpLZxHOeorjOHq9LSUhyn4jFdJWfoxhhiYmIAKCsro6ys7JAl4ZYuXcqgQYMAb5rA9OnTsdb6toyeadoSWp6Infca9uzzME7FFqQXEZHjExMTQ3FxMfv27Qu7pVSjo6PLbyzzc6y1OI5Tnp0VUWXT1lzX5fbbb2fr1q2ce+65h6xkU1BQQDAYBCAQCBAbG8uuXbtISDi4u3vevHnMmzcPgDFjxpCSkhKyGiMiIg7aX/Gvf8fOh+6k7vpV3n3epUJ+2o5ybNSOoaF2DA21Y2gc7Sj3o9p3pez1MBzH4eGHH2bPnj3885//ZNOmTTRp0qT8+cNdJzjckVlmZiaZmZnlP4dyGsVPp2XYlidB/TR2/vtpdrU+OeyOFCuLpgmFhtoxNNSOoaF2DI1QtGO1mbYWFxdH27ZtWbFixUGPB4NB8vPzAa9bvqioiPh4f2+9apwA5pxLYEM2rFvtay0iIiI/p0oCvbCwkD179gDeiPeVK1eSkZFx0DYdO3ZkwYIFACxevJh27dpVizNi060PxNfFffdVv0sRERE5oirpct++fTsTJ07EdV2stZxxxhl07NiRl156iZYtW9KpUyd69+7NY489xs0330x8fDwjR46sitJ+kYmOxvS6APvGi9itOZi0Rn6XJCIicgjd+vVHjnRtwxbuwL39Wky33jhDfh+y9wtXutYWGmrH0FA7hobaMTTC6hp6TWQS6mG69cYumo8t3OF3OSIiIodQoFeQOediKN2PXTDX71JEREQOoUCvIJPWCNp3wC58B6s7GImISDWjQD8KTq8LYGcBrNAqbCIiUr0o0I/GyR0gmIr7n7f8rkREROQgCvSjYJwAptd5kP05Nmej3+WIiIiUU6AfJdP9HIiIxC7QWbqIiFQfCvSjZOomYDr3wC5egC3a43c5IiIigAL9mJizL4B9xdhP/uN3KSIiIoAC/ZiY5q2hWWvsgrcOu0qciIhIVVOgHyNz9vmwNQfW/M/vUkRERBTox8p07gF1E3HfeEln6SIi4jsF+jEykVGYCy+H7M/hs0/9LkdERGo5BfpxMD3PhbRGuP9+SreDFRERXynQj4MJBHAGXg3ffYtd+Lbf5YiISC2mQD9ep3SCE0/BznkBW7Tb72pERKSWUqAfJ2MMzqBrYM9u7Fsv+12OiIjUUgr0EDBNWmDO6I19fw5221a/yxERkVpIgR4i5pIrwXGw/+9pv0sREZFaSIEeIiYpiOk3ELv0I+zaz/0uR0REahkFegiZcy/11kt/8QlsWZnf5YiISC2iQA8hExXtDZDL2Yhd+I7f5YiISC2iQA+1Dmd409hmP4vdXeh3NSIiUktEVMWb5OXlMXHiRHbs2IExhszMTM4///yDtlm1ahUPPfQQqampAHTp0oWBAwdWRXkhZYzBuew63L/fgn3tOcwVN/pdkoiI1AJVEuiBQIAhQ4bQokUL9u7dy+jRoznllFNo1KjRQduddNJJjB49uipKqlQmoynm7Auw89/E9uyHadzc75JERCTMVUmXe1JSEi1atACgTp06ZGRkUFBQUBVv7Rtz4eUQF4f71KPY/SV+lyMiImGuSs7Qfyw3N5cNGzbQqlWrQ57Lzs5m1KhRJCUlMWTIEBo3bnzINvPmzWPevHkAjBkzhpSUlJDVFhEREbr9paRQfNOd7BxzB1EvTyfh5j9jjAnNvqu5kLZjLaZ2DA21Y2ioHUOjMtvR2CpczLu4uJi7776bAQMG0KVLl4OeKyoqwnEcYmJiyMrKYubMmUyYMOEX97l58+aQ1ZeSkkJeXl7I9gfgvv4Cds4LmN9ci5N5cUj3XV1VRjvWRmrH0FA7hobaMTRC0Y7p6emHfbzKRrmXlpYyduxYevTocUiYA8TGxhITEwNAhw4dKCsro7Cw5o8SN/1/A6d3xc6agV29wu9yREQkTFVJoFtrmTJlChkZGfTv3/+w2+zYsYMDnQXr16/HdV3q1q1bFeVVKuM4ONeMhPTGuI8/hM3d4ndJIiIShqrkGvratWtZuHAhTZo0YdSoUQBcfvnl5d0Offv2ZfHixbz77rsEAgGioqIYOXJk2FxzNjGxOL//M+59t+FOuh/nzn9ioqL9LktERMJIlV5DrwzV/Rr6j9nPl+GO/xum13k4YTw/XdfaQkPtGBpqx9BQO4ZGWFxDFzDtO2L6XopdMBebtcjvckREJIwo0KuYufRKaNrKm5+ev83vckREJEwo0KuYiYjEuf6PUObiTv2nVmUTEZGQUKD7wKSmY668EdZ/gX3jRb/LERGRMKBA94nTtRemWx/sm7Owyxf7XY6IiNRwCnQfmSuGedfTp/0L+80Gv8sREZEaTIHuIxMVjfP7P0OdONzH7sXu3O53SSIiUkMp0H1m6iXj3HQX7C7EnXS/VmYTEZFjokCvBkzTljjX/AG+Wot9+jFq+L1+RETEBwr0asJ07Ia5+LfYxQuwixf4XY6IiNQwCvRqxJw/CFq1xb7wBLZAN50REZGKU6BXI8YJ4Fx9C5SVeneSU9e7iIhUkAK9mjGpDTGDrobVK7AfzPW7HBERqSEU6NWQOes8aHsa9uUZWj9dREQqRIFeDRljcK4aAYEI3BnjsK7u9y4iIj9PgV5NmeQUzOXXe/d7f+81v8sREZFqToFejZmuveC0rtjZz2K//drvckREpBpToFdjxhicIcO9W8NOfwRbut/vkkREpJpSoFdzJqEezpXDYdNX2Ddn+V2OiIhUUwr0GsB0OANzxtnYt17GbljndzkiIlINKdBrCHPZdZCYjDv9X9h9+/wuR0REqhkFeg1hYuNxho6Ard9iX5nhdzkiIlLNKNBrENP2NEzmxdj/vIX9bInf5YiISDWiQK9hzIDfQaPmuDPHY3du97scERGpJiKq4k3y8vKYOHEiO3bswBhDZmYm559//kHbWGuZMWMGy5cvJzo6muHDh9OiRYuqKK9GMZGRONfdhvuPP+DOGIcz4m6Mo+MyEZHarkqSIBAIMGTIEB555BHuu+8+3nnnHXJycg7aZvny5WzdupUJEyZw/fXXM3Xq1KoorUYy6U0wg6+BVcux8+f4XY6IiFQDVRLoSUlJ5WfbderUISMjg4KCgoO2Wbp0KT179sQYQ5s2bdizZw/bt6tL+UjMWefBqb/CvvIU9usv/S5HRER8ViVd7j+Wm5vLhg0baNWq1UGPFxQUkJKSUv5zMBikoKCApKSkg7abN28e8+bNA2DMmDEHveZ4RUREhHR/lc299W7y/3gNTL6fpIemEkiu73dJQM1rx+pK7RgaasfQUDuGRmW2Y5UGenFxMWPHjmXo0KHExsYe9Jy19pDtjTGHPJaZmUlmZmb5z3l5eSGrLyUlJaT7qxLD78R98Hby/n4bzqgHMNHRfldUM9uxGlI7hobaMTTUjqERinZMT08/7ONVNpqqtLSUsWPH0qNHD7p06XLI88Fg8KBfMj8//5CzczmUadwc57o/wqYvcWc8gnVdv0sSEREfVEmgW2uZMmUKGRkZ9O/f/7DbdOrUiYULF2KtJTs7m9jYWAV6BZlTf4UZOBSWLcK+/rzf5YiIiA+qpMt97dq1LFy4kCZNmjBq1CgALr/88vIz8r59+3L66aeTlZXFiBEjiIqKYvjw4VVRWtgw51wCW3Kwb87CTWuE07WX3yWJiEgVqpJAP/HEE5k16+dXCjPG8H//939VUU5YMsbAFcOwuVuwTz2KTW2IaXGC32WJiEgV0R1JwoiJiMQZNhrqJeNOuh9boAEsIiK1hQI9zJi6CTg3/QWKi3En3qeV2UREagkFehgyGU28ke/ffIWdMe6wUwJFRCS8KNDDlDm1M+bXV2GXfYx94yW/yxERkUpW5XeKk6pj+l4KOV9jX38em94E07Gb3yWJiEglqfAZ+htvvMHGjRsByM7O5sYbb+Smm24iOzu7smqT42SMwfzu99DiBNzpj2A3feV3SSIiUkkqHOhvvvkmqampALzwwgv079+fAQMGMHPmzMqqTULAREbhDL8T4uriTvwHtlAL3oiIhKMKB3pRURGxsbHs3buXjRs3ct5559G7d282b95cmfVJCJjEJJyb/gy7C3EnPYDdv9/vkkREJMQqHOjBYJC1a9fy8ccfc9JJJ+E4DkVFRTiOxtXVBKZJS5yrR8KXa3CnjMHuLfK7JBERCaEKp/GVV17Jv/71L1599VUGDhwIQFZW1iHLoEr1ZTqdifntMPh8Ge79f8RuyfG7JBERCRFjj2OScmlpKeCt7+qXUHb515blAe3albiPPwT7S3CuvRVzWteQ7r+2tGNlUzuGhtoxNNSOoVEtlk/Nyclhx44dgLeu+axZs5g9ezZlZWXHVZhUPXPCyTh3/QsaZOBOvB/39Rd08xkRkRquwoE+fvx4ioq8665PP/00X3zxBdnZ2TzxxBOVVpxUHpNcH+f2MZgzemPnvODdUa5Ug+VERGqqCveVb9u2jfT0dKy1LFmyhLFjxxIVFcVNN91UmfVJJTKRUXD1LZCahn3teez2fJwb78DExvldmoiIHKUKn6FHRkayd+9e1q9fTzAYJCEhgcjISPZrClSNZozB6X8Z5ppbYd1q3IdGYwu2+V2WiIgcpQqfoXfv3p2///3v7N27l379+gGwYcOG8pvNSM3mnHE2tl4y7uQxuA+MwrnlHkyjZn6XJSIiFVThQB86dCifffYZgUCA9u3bA97Z3VVXXVVpxUnVMiedinP7GNxx9+A+dAfO7/+MOaG932WJiEgFHNVdYU499VTS0tLIzs4mLy+Pli1bloe7hAeT0RRn9ENQLxl33N3YZYv8LklERCqgwmfo27dvZ9y4caxbt474+Hh27dpFmzZtuOWWW0hOTq7MGqWKmaA3At599F7cxx/EXH4Dztnn+12WiIj8jAqfoT/55JM0bdqU6dOn88QTTzBjxgyaNWvGk08+WZn1iU9MXF2cW++FUzpjn5+C++5sv0sSEZGfUeFAX7t2Lb/73e+IiYkBICYmhiuvvFLLp4YxEx2NM2w0pmN37MvTcd9+xe+SRETkCCrc5R4XF0dOTg7NmjUrf2zz5s3ExsZWRl1STZiICLjuj+A42Feewi0rw7lgsN9liYjIT1Q40C+66CLuvfdeevfuTf369dm2bRsLFizgN7/5TWXWJ9WACQTg2j94oT77WVzXxbnwMr/LEhGRH6lwoGdmZpKWlsZHH33Epk2bSEpK4qabbmLNmjW/+NpJkyaRlZVFYmIiY8eOPeT5VatW8dBDD5XPae/SpUv5im5SPZhAAK4Z6YX668/jRkXhnDvA77JEROR7R7VMWvv27Q+aprZ//37uv//+XzxL79WrF/369WPixIlH3Oakk05i9OjRR1OOVDHjBGDoCNi/H/vvmbh1YnF69vO7LBER4SgD/Vi1bduW3NzcqngrqWTGCcC1t2KL92KfnYwbXQeny1l+lyUiUusd1Y1lKlN2djajRo3i/vvv55tvvvG7HPkZJiISZ9hoaN3WW6XtsyV+lyQiUuv94hn6559/fsTnSktLQ1JE8+bNmTRpEjExMWRlZfHwww8zYcKEw247b9485s2bB8CYMWNISUkJSQ0AERERId1fuHPvHsf2u2+mdMoY4gZfTdwlV2AiI9WOIaJ2DA21Y2ioHUOjMtvxFwN98uTJP/t8KAr78dS3Dh06MG3aNAoLC0lISDhk28zMTDIzM8t/zsvLO+73PyAlJSWk+6sN7O/vwjw7mT3PP8GeBW/jDPk99bv2UDuGgD6PoaF2DA21Y2iEoh3T09MP+/gvBvrPDWQLlR07dpCYmIgxhvXr1+O6LnXr1q3095XjZ+ITMMNux362BPf5ybgP3k7huZdiL7wcEx3jd3kiIrVGlQyKGzduHKtXr2bXrl0MGzaMwYMHl3fX9+3bl8WLF/Puu+8SCASIiopi5MiRGGOqojQJEXNqZ5wT2mNfe569786Gz7Nwht+BST38kaSIiISWsdZav4s4Hps3bw7ZvtSlFBp1c75kxz//CtbF+b/bMCd38rukGkmfx9BQO4aG2jE0KrPLvdqMcpfwEX1aF5w/j4Vgqrdi2xsvYl3X77JERMKaAl0qhamfhnP7Q5guZ2Ffex47czy2rMzvskREwlaVXEOX2slER8M1t0KDDOxrz2H3FuFcPwoTGeV3aSIiYUdn6FKpjDE4/X+Dufx6WPEp7vi/YYuL/C5LRCTsKNClSji9+2Ou/QOsW4U79i/YXYV+lyQiElYU6FJlnK69cIbfCTkbcR/4I3ZLjt8liYiEDQW6VClz6q9w/ngfFO/FfWAUdvUKv0sSEQkLCnSpcqblid60tuQU3PH34C6Y63dJIiI1ngJdfGGCqTi3PwjtOmCfm4z79GMaLCcichwU6OIbUycW56Y/Y84biP3oPdy/3YLNXuV3WSIiNZICXXxlnADOgN/hjHoAjMH95524L8/A7i/xuzQRkRpFgS7VgmndFuev4zE9zsW++yru30fqbF1E5Cgo0KXaMDF1cIYMx7nlHthfgvvwHd619T27/S5NRKTaU6BLtWPad8D522OYvpdiP56H+9fhuJ9+QA1fGFBEpFIp0KVaMtExOIOu9qa3JaVgp47FffB27Fdr/S5NRKRaUqBLtWaatMS582HM0BGQ9x3uA6Nwp47FFmhdZhGRH9Nqa1LtGSeA6Z6J7dgNO/cV7Luzsf9binPVTZiO3f0uT0SkWtAZutQYJiYW59IhOH+fCGkZuFMexH1usqa4iYigQJcayNRPw/nTA96guQVzce8fhd2qhV5EpHZToEuNZCIivUFzI/4KO/Jw/3Ebdvliv8sSEfGNAl1qNHNyJ5y/jIf0xriT7sd97Xms6/pdlohIlVOgS41nklNwRt2P6dYH+8aLuJPux+7VQi8iUrso0CUsmMgozNARmMuvh5VLce8difvfhVi3zO/SRESqhAJdwoYxBqd3f5w//AMiIrFP/hP3nhG4Sz5UN7yIhL0qmYc+adIksrKySExMZOzYsYc8b61lxowZLF++nOjoaIYPH06LFi2qojQJQ+aE9jj3PIpdtgg75wXsEw9jM2bhXH0Lpmkrv8sTEakUVXKG3qtXL+68884jPr98+XK2bt3KhAkTuP7665k6dWpVlCVhzDgOTuczce6ZgLnuj1C0B/eBP+G++6rO1kUkLFVJoLdt25b4+PgjPr906VJ69uyJMYY2bdqwZ88etm/fXhWlSZgzTgDnVz1x7h4Pp3TCvjwDd8LfsIX6fIlIeKkWt34tKCggJSWl/OdgMEhBQQFJSUmHbDtv3jzmzZsHwJgxYw563fGKiIgI6f5qq2rZjikp2L+MZe87s9k1Yzz8fSRxV48gpmdfjDF+V3dY1bIdayC1Y2ioHUOjMtuxWgT64ZbFPNIf2czMTDIzM8t/zssL3SIdKSkpId1fbVWt27FTD5yGTXFnjKNw3N8ofOvfOL8dhslo6ndlh6jW7ViDqB1DQ+0YGqFox/T09MM+Xi1GuQeDwYN+wfz8/MOenYuEgslo4q3gNmQ4fLsJ9++34L40Fbur0O/SRESOWbUI9E6dOrFw4UKstWRnZxMbG6tAl0plnABOz344/5iMObMv9v05uKOvwX1+CnbbVr/LExE5alXS5T5u3DhWr17Nrl27GDZsGIMHD6a0tBSAvn37cvrpp5OVlcWIESOIiopi+PDhVVGWCCY+ATNkODbzQuw7r2IXvotd8DamU3dMnwuhxQnV9hq7iMiPGXu4C9g1yObNm0O2L10jCo2a3I52ez72/dexH7wNxXuhSQvMWedhupyFiY6p0lpqcjtWJ2rH0FA7hkbYX0MXqS5MUhBn4NU4D8/AXHEjlJVhn5mIO+pq3FefxRbt8btEEZHDqhaj3EWqGxMTi+l1HvasfrD+C9z3X8e+NQu7cC7m/MGYXudjIiP9LlNEpJwCXeRnGGOgdVsCrdtiv16P+/+exs6ahn1/DqbvJZjOPTF1E/wuU0REgS5SUaZpKwK3/h27egXu7GexLzyBnTUdTu6Ec0YvOLmzztpFxDcKdJGjZNqeRqDtadhvNmAX/wf76Qe4KxZD3UTMmZmYnv0wKQ38LlNEahkFusgxMo2bYxo3xw64ClavwF34NvbtV7Fv/z84uROm69mYVidhkoJ+lyoitYACXeQ4mUAATu5I4OSO2Pxt2A/fwX74LvZ/S7AAySmY5ifAiadgftUTExvnd8kiEoYU6CIhZIL1MZdcie1/GXzzFfbLNfDVWuxXa2HZx9iXp3tz2s/qp7XZRSSkFOgilcBEREDzNpjmbcofsxvXYT94G/vpAuyH70KTlpjOZ2I6dsfUT/OxWhEJBwp0kSpimrXGNGuNHXQ19pMF2E/mY195CvvKU94d6TqdienWB5OodQxE5Ogp0EWqmImNx/TpD336Y/O+w2YtwmZ9gv1/T2Nfex7T4QzM2Rdggz38LlVEahAFuoiPTEoDTN9Loe+l2K3fYj+Yi/34feySDylo0gK3fUdM29Oh5QmYCM1xF5EjU6CLVBMmLQPzm//DXnIl9tMPMEsWYt9+BfvWyxAdA23aY07rgjm9K6Zuot/likg1o0AXqWZMdAym57kkD7iCbZu+hrUrsatXYFdlYZ9Zin12MrRph+nYzQv3eprnLiIKdJFqzcTGweldMad3xVoLORuxy5ge1lsAABctSURBVD72rrk//zj2hSeg5UleuHc4A5Nc3++SRcQnCnSRGsIYA9/fnY5LrsRu3uQNqFu2CPvSVOxLU72pch3O8L5SD79msoiEJwW6SA1l0ptg0ptA/8u8AXUHRssfmAqX0dS7M90ZvXX7WZFaQIEuEgZMWgbm/EFw/iBsfi52+Sfemfurz2BnPwftTsc5MxNOPBVi47yzfREJKwp0kTBjgqmYzIsh82Js7mbsx/Oxn8zHnfKgt4HjQFxdiE/ANG2FuWAwJi3D36JF5Lgp0EXCmElNx1x6Jfbiy2HN/7DfboLdu2B3IXbXDq+b/r8fYLpnYvr/RoPqRGowBbpILWCcALQ93btJzY/Ywu3Yt/7t3dDmk/mYzj2829A2bALpTaBesrrnRWoIBbpILWYSkjCXXYc95xLsGy9iVyyGT/7jLfsKUDfRu8d8l7OgxQkKd5FqTIEuIt6yr1fdDFfdjC3cAVu+wW7+xrupzYfvYv/zJtRPw3TugWneGhq39NZ5V8CLVBtVFugrVqxgxowZuK5Lnz59uOSSSw56fsGCBTzzzDMkJycD0K9fP/r06VNV5YnI90xCPUiohznhZDj7fGzRHm/U/KcfYOf+27vBDXgD6xo1w6Q2hPppkJKGSU2D9KaYSN13XqSqVUmgu67LtGnTuOuuuwgGg9xxxx106tSJRo0aHbRdt27duPbaa6uiJBGpIBMbh+meCd0zscV74duvsd98Bd9swOZsxK74FHbtBPC66iMioVlrTKuTMK3ael31dRN8/R1EaoMqCfT169eTlpZGgwYNAC+4lyxZckigi0j1ZmLqQMsTMS1PPOhxW7wX8r6D777FfrUWu2419r3XsG+/4m2Qmu69puWJmFM660Y3IpWgSgK9oKCAYPCH/4GDwSDr1q07ZLtPP/2UL774goYNG3LVVVeRkpJSFeWJyHEyMXWgUTOvC75jdwBsyT7YuA775VrsV2uwny+DT+ZjjQPtO+B0z4RTO2tZWJEQqZJAL7/m9iM/HUzTsWNHunfvTmRkJO+++y4TJ07k7rvvPuR18+bNY968eQCMGTMmpKEfERGhg4gQUDuGRli0Y3oGdOsFeH8HyjZvonjB2+z9z1u4U8ZgEuoR1flMotqfTlS70wnUTwt5CWHRjtWA2jE0KrMdqyTQg8Eg+fn55T/n5+eTlJR00DZ169Yt/z4zM5PnnnvusPvKzMwkMzOz/Oe8vLyQ1ZmSkhLS/dVWasfQCMt2jI6Dc38N51yCs2oF9uN5FH+ygOL33/CeD6ZC01aY+g28QXb10yCt0XGNqA/LdvSB2jE0QtGO6emHX3ipSgK9ZcuWbNmyhdzcXJKTk1m0aBEjRow4aJvt27eXh/zSpUt1fV0kjBknACd3xJzcEeu63kC7tSux2Z973//vv1BaetB8eJq1xjRrhWnU3BtVH0z1lpcVEaCKAj0QCHDNNddw33334bouZ599No0bN+all16iZcuWdOrUiblz57J06VICgQDx8fEMHz68KkoTEZ8Zx/lhWdjMiwC8kN9RAHlbvdvVblyH3bgO+3kW1ro/vDg23psf3yADGqRDWgYmrRGkN9a1eal1jD3cBe4aZPPmzSHbl7qUQkPtGBpqx0PZ4r2wNQfyc7F530Hed9jcrfDdt1CwDQ78OQtEeMvHNmlBfLtT2ZPeDBo21o1wjoM+j6FR47vcRURCwcTUgWatve73nzxnS/ZB7hbs5k2w6SvsN19hVyxm10fveRscuFlOm3aYekHv7D4u3rtBTmKSwl5qPAW6iIQFExXtTZtr1Ax+1RPwRtYnle6jYPFC7za2a1fCkg85pFsyMQnTpj20ae+FflqGAl5qHAW6iIQtYwwRDRvh9OgLPfp6U2gLtnl3tivajd2zB3btgC/XYrN/FPZ1E70z+dbtMW3aQXoTTCDg968j8rMU6CJSaxhjvKlxwVTv5wNP9O7vhf22Ldi1n0P2Kuy6Vdhli7yADwQg2AAapGMapHuBHxEBgUiIjPD2VFYGZaXev/EJmHanY+ol+/OLSq2kQBcR4fuwT03HpKZDj74A2Pxc7LpVsHkT9rstkLvZ67Yv2feL+7Pgzak/uRPmpFMgJQ3qJXlT9kQqgQJdROQITDAV8/3Z/AHWWigthdL93r9l+8G1EBHwRtcHAt7o+/8txa5cin1zFvaNF70XBwJQLwjB+t5Nc+o3/P4gIg1SGkBsvK7dyzFToIuIHAVjDERGel9H0qi5dwOc8wdhdxXCxmxsQZ53/b5gGzbvO+znWbBzO8APg/TqxHpd+ykNMIn1vK79uolQt563TG2DdG+kv8hhKNBFRCqRqZsAJ3c6ZJodgN1XDNu2etPtDsyrz/vO69r/8gvYXVg+t7489JNSvFH4KQ28s/2kICYpBRISv5+KVxdi6uhMvxZSoIuI+MREx/ywSt1hnrduGezZ7Z3J527GbsmBrd9it+ZgP/svFO7wtvvpCx0HoutAVBRERUNkFMTXxaSkwYH75NdLhugYiIrxtouLx8TGV/JvLJVJgS4iUk0ZJ/BDt/thQt+W7vfCviAPdhdii3bDnl3eQcC+Ym/wXsk+7P4SKNyJXb3cu6UuhzkIAIiN8wbv1W/ghX9qGqZ+Q0htiE1K8g4wSku9L+vqmn81o0AXEamhTETk4afh/Qxbsg/yc72z++9D3+7bB3sKvS7/bVsh52vsiv9C2Q8L5OQebmcRkZCc4i2Uk5TiHRBEx3i9AzExmMQkr7bkVK+HQOFfqRToIiK1iImKhoaNva8Djx1mO+uWwfZ87/r+ti3E7iumaF+xN/c+4vvo2FkABXne9L7VK6C4yDtI+Ol1f/C6/usle70NCfUwCfW8MQDJ9b2ZBMH6EF/X238g4C3aI0dFgS4iIocwTqD87N+cdCrxKSkUV2BREWstlJR44b6jwFtIpyAX8rfBzu3Ywh3w3WbsutXeoD+O0P3vOBAZDXFx3mC/2HioE+uNO4iO8Q4QYup4BweJSZCYBHXrfT9mINIbNxAZWavm/SvQRUQkZIwxEB3tfSUmQdOWR7wUYEv2edf/C7Zh83OhaI93t73S7++4V1IMe3Z7YwOKdnsHB/uKvQOGkmIoLgbrHv6A4ICoqO8vAXz/5QS8gwVjvH/j6v5wQJCQhEkKfj97IBniE2tUT4ECXUREfGGioiEtw5uGdwyvt24Z7N7lDQzcuR27e6cX9vv3w/593vf7iqF4LxQXeQcDrusN6HMtuGWQvw371dryKYIHHRwEIiCxHiQk/dATkFDPW8wn4fuDgPgEiPl+3EB0tK89Agp0ERGpkYwT8AI2oR40bn5MBwUH2NJSb9GeHfmwPR/7/b/eZYLtsD0P+/V6KNz5870CEZFer0DkD13/zuiHMLFxx1FdxSjQRUSk1jMREZDk3aiH5keeMWDdMthV+EOvwJ5C2LcP9u39/t9i2F9S/mVLSn7+roIhpEAXERGpIOMEvK72xCTvZ5/r+bGac7VfREREjkiBLiIiEgYU6CIiImFAgS4iIhIGFOgiIiJhQIEuIiISBhToIiIiYUCBLiIiEgaMtfZn72svIiIi1Z/O0H9k9OjRfpcQFtSOoaF2DA21Y2ioHUOjMttRgS4iIhIGFOgiIiJhIHDPPffc43cR1UmLFi38LiEsqB1DQ+0YGmrH0FA7hkZltaMGxYmIiIQBdbmLiIiEAQW6iIhIGIjwu4DqYsWKFcyYMQPXdenTpw+XXHKJ3yXVCHl5eUycOJEdO3ZgjCEzM5Pzzz+f3bt388gjj7Bt2zbq16/PrbfeSnx8vN/lVnuu6zJ69GiSk5MZPXo0ubm5jBs3jt27d9O8eXNuvvlmIiL0v+3P2bNnD1OmTOGbb77BGMONN95Ienq6Po9H6Y033mD+/PkYY2jcuDHDhw9nx44d+jz+gkmTJpGVlUViYiJjx44FOOLfQ2stM2bMYPny5URHRzN8+PDjur6uM3S8P6LTpk3jzjvv5JFHHuHjjz8mJyfH77JqhEAgwJAhQ3jkkUe47777eOedd8jJyWH27NmcfPLJTJgwgZNPPpnZs2f7XWqN8NZbb5GRkVH+87PPPssFF1zAhAkTiIuLY/78+T5WVzPMmDGD0047jXHjxvHwww+TkZGhz+NRKigoYO7cuYwZM4axY8fiui6LFi3S57ECevXqxZ133nnQY0f6/C1fvpytW7cyYcIErr/+eqZOnXpc761AB9avX09aWhoNGjQgIiKCbt26sWTJEr/LqhGSkpLKjyjr1KlDRkYGBQUFLFmyhLPOOguAs846S+1ZAfn5+WRlZdGnTx8ArLWsWrWKrl27At4fCrXjzysqKuKLL76gd+/eAERERBAXF6fP4zFwXZeSkhLKysooKSmhXr16+jxWQNu2bQ/p/TnS52/p0qX07NkTYwxt2rRhz549bN++/ZjfW30leEejwWCw/OdgMMi6det8rKhmys3NZcOGDbRq1YqdO3eSlJQEeKFfWFjoc3XV38yZM7nyyivZu3cvALt27SI2NpZAIABAcnIyBQUFfpZY7eXm5pKQkMCkSZP4+uuvadGiBUOHDtXn8SglJydz4YUXcuONNxIVFcWpp55KixYt9Hk8Rkf6/BUUFJCSklK+XTAYpKCgoHzbo6UzdLwzoZ8yxvhQSc1VXFzM2LFjGTp0KLGxsX6XU+MsW7aMxMREzfM9TmVlZWzYsIG+ffvy0EMPER0dre71Y7B7926WLFnCxIkTefzxxykuLmbFihV+lxV2Qp09OkPHOyrKz88v/zk/P/+Yj5Bqo9LSUsaOHUuPHj3o0qULAImJiWzfvp2kpCS2b99OQkKCz1VWb2vXrmXp0qUsX76ckpIS9u7dy8yZMykqKqKsrIxAIEBBQQHJycl+l1qtBYNBgsEgrVu3BqBr167Mnj1bn8ejtHLlSlJTU8vbqUuXLqxdu1afx2N0pM9fMBgkLy+vfLvjzR6doQMtW7Zky5Yt5ObmUlpayqJFi+jUqZPfZdUI1lqmTJlCRkYG/fv3L3+8U6dOfPDBBwB88MEHdO7c2a8Sa4Tf/va3TJkyhYkTJzJy5Ejat2/PiBEjaNeuHYsXLwZgwYIF+lz+gnr16hEMBtm8eTPgBVOjRo30eTxKKSkprFu3jn379mGtLW9HfR6PzZE+f506dWLhwoVYa8nOziY2Nva4Al13ivteVlYWTz31FK7rcvbZZzNgwAC/S6oR1qxZw1//+leaNGlS3lV0+eWX07p1ax555BHy8vJISUnhD3/4g6YJVdCqVauYM2cOo0eP5rvvvjtkmlBkZKTfJVZrGzduZMqUKZSWlpKamsrw4cOx1urzeJRmzZrFokWLCAQCNGvWjGHDhlFQUKDP4y8YN24cq1evZteuXSQmJjJ48GA6d+582M+ftZZp06bx2WefERUVxfDhw2nZsuUxv7cCXUREJAyoy11ERCQMKNBFRETCgAJdREQkDCjQRUREwoACXUREJAzoxjIiYW7ixIkEg0Euu+yyKn9vay2TJ09myZIlpKWl8cADD1R5DSK1hQJdpIr9/ve/p6SkhEcffZSYmBgA3n//fT788EPuuecef4sLsTVr1vC///2PyZMnl/+uP1ZaWsrzzz/PokWL2LNnDwkJCXTu3JmhQ4cCXlvdcMMNnHLKKVVcuUjNo0AX8UFZWRlvvfVWjbuBkeu6OE7Fr9QdWP/5cGEO8Oqrr/Lll19y//33k5SUxLZt2/jiiy9CVa5IraJAF/HBRRddxGuvvca5555LXFzcQc/l5uZy00038cILL5SvbHXPPffQo0cP+vTpw4IFC3j//fdp2bIlCxYsID4+nptvvpktW7bw0ksvsX//fq688kp69epVvs/CwkLuvfde1q1bR/PmzbnpppuoX78+AN9++y3Tp0/nq6++IiEhgd/85jd069YN8Lrro6KiyMvLY/Xq1YwaNeqQs+WCggKefPJJ1qxZQ3x8PBdffDGZmZnMnz+fadOmUVpaypAhQ7jwwgsZPHjwQa/98ssv+dWvflV+T/DU1FRSU1MBePTRR8nLy+PBBx/EcRwGDhzIxRdfTHZ2Nk8//TQ5OTnUr1+foUOH0q5du/J2atOmDStXrmTz5s20a9eO4cOHEx8fT0lJCVOmTGHFihW4rkvDhg25/fbbqVevXoj+q4r4S4PiRHzQokUL2rVrx5w5c47p9evWraNp06ZMnz6dM888k3HjxrF+/XomTJjAzTffzPTp0ykuLi7f/qOPPuLXv/4106ZNo1mzZkyYMAHwVsn7xz/+wZlnnsnUqVO55ZZbmDZtGt98881Br7300kt56qmnOPHEEw+pZfz48QSDQR5//HFuu+02XnjhBVauXEnv3r257rrraNOmDc8888whYQ7QunVr3njjDd555x02bdp00OpTN998MykpKdx+++0888wzXHzxxRQUFDBmzBgGDBjA9OnTGTJkCGPHjj1oOdQPPviAG2+8kccffxzHcZg+fXr540VFRUyePJnp06dz3XXXERUVdUztL1IdKdBFfDJ48GDmzp17TGtzp6amcvbZZ+M4Dt26dSM/P5+BAwcSGRnJqaeeSkREBFu3bi3fvkOHDrRt25bIyEguv/xysrOzycvLIysri/r163P22WcTCARo0aIFXbp0KV+AA6Bz586ceOKJOI5zSADm5eWxZs0arrjiCqKiomjWrBl9+vRh4cKFFfo9Lr30Ui6++GI++ugjRo8ezbBhw1iwYMERt1+4cCGnn346HTp0wHEcTjnlFFq2bElWVlb5Nj179qRJkybExMRw2WWX8cknn+C6LoFAgN27d7N161Ycxylf31skXKjLXcQnTZo0oWPHjsyePZuMjIyjem1iYmL59wdC9sddx1FRUQedoQeDwfLvY2JiiI+PZ/v27Wzbto1169aVD0ID7/p+z549D/van9q+fTvx8fHUqVOn/LGUlBS+/PLLCv0ejuPQr18/+vXrR0lJCfPnz2fy5Mm0atWKRo0aHbJ9Xl4eixcvZtmyZQfVe6DL/af1pqSkUFZWRmFhIT179iQ/P59x48ZRVFREjx49uOyyy4iI0J9BCQ/6JIv4aPDgwdx+++0HLT17YADZvn37ys8gd+zYcVzvk5+fX/59cXExu3fvJikpiWAwSNu2bfnLX/5yxNceWEXvcJKSkti9ezd79+4tD/W8vLxjWic7KiqKfv368fLLL5OTk3PYQA8Gg/To0YNhw4YdcT8//l3z8vIIBAIkJCTgOA6DBg1i0KBB5Obm8sADD5Cenk7v3r2PulaR6khd7iI+SktL44wzzmDu3LnljyUkJJCcnMyHH36I67rMnz+f77777rjeZ/ny5axZs4bS0lJefPFFWrduTUpKCh07dmTLli0sXLiQ0tJSSktLWb9+PTk5ORXab0pKCieccALPP/88JSUlfP311/znP/+hR48eFXr9m2++yapVqygpKaGsrIwFCxawd+9emjdvDni9Drm5ueXb9+jRg2XLlpUPbCspKWHVqlUHhfiHH35ITk4O+/btY9asWXTt2hXHcfj888/ZtGkTrusSGxtLRETEUY3YF6nudIYu4rOBAwfy4YcfHvTYDTfcwNSpU3nhhRfo3bs3bdq0Oa736N69Oy+//DLZ2dm0aNGCESNGAFCnTh3uuusunnrqKZ566imstTRt2pSrrrqqwvu+5ZZbePLJJ7nhhhuIj49n0KBBFZ43HhUVxdNPP83WrVsxxtCwYUNuu+02GjRoAMAll1zC9OnTefbZZxkwYAAXXXQRf/rTn3j22WcZP348juPQqlUrrrvuuvJ99uzZk4kTJ7J582ZOOukkhg8fDni9HE8++SQFBQXExMRwxhlnVPjAQ6Qm0HroIhI2fjy9T6S2UX+TiIhIGFCgi4iIhAF1uYuIiIQBnaGLiIiEAQW6iIhIGFCgi4iIhAEFuoiISBhQoIuIiISB/w/SWineZXHPmgAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "plot_stats_in_graph(metric_dict, y_axis_label='Loss', x_axis_label='Number of Steps')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**TA**: There we go, the network is doing much better during training with a multi-layer neural network. :)\n", + "\n", + "**Student**: Hmm.. I am weirdly excited even though I have not digested this completely yet. Where do I go to learn more? \n", + "\n", + "**TA**: Firstly, I think you should go and have a look at the MLP Pytorch Framework, so you can learn how Pytorch can be used with more complicated architectures, as well as to learn some good coding practices for research and industry alike. When you are working on your coursework, make sure to have the [pytorch official documentation page](https://pytorch.org/docs/stable/nn.html) open in your browser, as it is extremely well written most of the times. Then, when you have some spare time, perhaps in preparation for next term, I would recommend going through some of the Pytorch tutorials at the [pytorch tutorials page](https://pytorch.org/tutorials/). Finally, the best way to learn, in my opinion, is by engaging with Pytorch through a project that interests you." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/Plot_Results.ipynb b/notebooks/Plot_Results.ipynb new file mode 100644 index 00000000..5cb3a3a4 --- /dev/null +++ b/notebooks/Plot_Results.ipynb @@ -0,0 +1,200 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import sys\n", + "import matplotlib\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "%matplotlib inline\n", + "plt.style.use('ggplot')\n", + "experiment_dir = 'path/to/mlpractical_directory'" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "def collect_experiment_dicts(target_dir, test_flag=False):\n", + " experiment_dicts = dict()\n", + " for subdir, dir, files in os.walk(target_dir):\n", + " for file in files:\n", + " filepath = None\n", + " if not test_flag:\n", + " if file == 'summary.csv':\n", + " filepath = os.path.join(subdir, file)\n", + " \n", + " elif test_flag:\n", + " if file == 'test_summary.csv':\n", + " filepath = os.path.join(subdir, file)\n", + " \n", + " if filepath is not None:\n", + " \n", + " with open(filepath, 'r') as read_file:\n", + " lines = read_file.readlines()\n", + " \n", + " current_experiment_dict = {key: [] for key in lines[0].replace('\\n', '').split(',')}\n", + " idx_to_key = {idx: key for idx, key in enumerate(lines[0].replace('\\n', '').split(','))}\n", + " \n", + " for line in lines[1:]:\n", + " for idx, value in enumerate(line.replace('\\n', '').split(',')):\n", + " current_experiment_dict[idx_to_key[idx]].append(float(value))\n", + " \n", + " experiment_dicts[subdir.split('/')[-2]] = current_experiment_dict\n", + " \n", + " return experiment_dicts\n", + " \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "VGG_08 ['train_acc', 'train_loss', 'val_acc', 'val_loss']\n", + "VGG_38 ['train_acc', 'train_loss', 'val_acc', 'val_loss']\n" + ] + } + ], + "source": [ + "result_dict = collect_experiment_dicts(target_dir=experiment_dir)\n", + "for key, value in result_dict.items():\n", + " print(key, list(value.keys()))" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "%matplotlib inline\n", + "plt.style.use('ggplot')\n", + "\n", + "def plot_result_graphs(plot_name, stats, keys_to_plot, notebook=True):\n", + " \n", + " fig_1 = plt.figure(figsize=(8, 4))\n", + " ax_1 = fig_1.add_subplot(111)\n", + " for name in keys_to_plot:\n", + " for k in ['train_loss', 'val_loss']:\n", + " item = stats[name][k]\n", + " ax_1.plot(np.arange(0, len(item)), \n", + " item, label='{}_{}'.format(name, k))\n", + " \n", + " ax_1.legend(loc=0)\n", + " ax_1.set_ylabel('Loss')\n", + " ax_1.set_xlabel('Epoch number')\n", + "\n", + " # Plot the change in the validation and training set accuracy over training.\n", + " fig_2 = plt.figure(figsize=(8, 4))\n", + " ax_2 = fig_2.add_subplot(111)\n", + " for name in keys_to_plot:\n", + " for k in ['train_acc', 'val_acc']:\n", + " item = stats[name][k]\n", + " ax_2.plot(np.arange(0, len(item)), \n", + " item, label='{}_{}'.format(name, k))\n", + " \n", + " ax_2.legend(loc=0)\n", + " ax_2.set_ylabel('Accuracy')\n", + " ax_2.set_xlabel('Epoch number')\n", + " \n", + " fig_1.savefig('../data/{}_loss_performance.pdf'.format(plot_name), dpi=None, facecolor='w', edgecolor='w',\n", + " orientation='portrait', papertype=None, format='pdf',\n", + " transparent=False, bbox_inches=None, pad_inches=0.1,\n", + " frameon=None, metadata=None)\n", + " \n", + " fig_2.savefig('../data/{}_accuracy_performance.pdf'.format(plot_name), dpi=None, facecolor='w', edgecolor='w',\n", + " orientation='portrait', papertype=None, format='pdf',\n", + " transparent=False, bbox_inches=None, pad_inches=0.1,\n", + " frameon=None, metadata=None)\n", + " \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":32: MatplotlibDeprecationWarning: \n", + "The frameon kwarg was deprecated in Matplotlib 3.1 and will be removed in 3.3. Use facecolor instead.\n", + " fig_1.savefig('../data/{}_loss_performance.pdf'.format(plot_name), dpi=None, facecolor='w', edgecolor='w',\n", + ":37: MatplotlibDeprecationWarning: \n", + "The frameon kwarg was deprecated in Matplotlib 3.1 and will be removed in 3.3. Use facecolor instead.\n", + " fig_2.savefig('../data/{}_accuracy_performance.pdf'.format(plot_name), dpi=None, facecolor='w', edgecolor='w',\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfQAAAEMCAYAAAAyF0T+AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOzde1yUdd74/9c1Z84MDIiCqCgggqKm5Sm1QqO00n62teXut3urvdsO2pZ9tcPW9ru9ze2c1W7tlrVb7b3m2sluTWHX0jI1BRXxACgKqKCcTzMDM3N9/0AmUVBwYDj4fj4ePIS5Tp95O/C+Pp/rc1BUVVURQgghRK+m6e4CCCGEEMJzktCFEEKIPkASuhBCCNEHSEIXQggh+gBJ6EIIIUQfIAldCCGE6AN03rrQgw8+iMlkQqPRoNVqWb58eYvtqqry/vvvk5mZidFo5IEHHiAmJsZbxRNCCCF6Na8ldIBnn32WwMDAVrdlZmZSXFzMihUryM3N5d1332XZsmXeLJ4QQgjRa/WYJvedO3cydepUFEUhLi6Ouro6KioqurtYQgghRK/g1Rr6f//3fwMwY8YMUlJSWmwrLy/HYrG4fw4NDaW8vByz2XzBc544caLTymexWCgtLe20812uJI6ekxh6TmLoOYmh57oihgMGDGj1da8l9P/6r/8iJCSEqqoqli5dyoABAxgxYoR7e2sz0CqKct5r6enppKenA7B8+fIWNwGe0ul0nXq+y5XE0XMSQ89JDD0nMfScN2PotYQeEhICQFBQEOPHjycvL69FQg8NDW1xF1NWVtZq7TwlJaVF7b4z73zkbrRzSBw9JzH0nMTQcxJDz3mzhu6VZ+g2mw2r1er+fu/evURHR7fYZ9y4cWzevBlVVcnJycHX1/eize1CCCGEaOKVGnpVVRUvvfQSAE6nkylTpjB69Gg2btwIwMyZMxkzZgwZGRksWLAAg8HAAw884I2iCSGEEH2C0tuXT5VOcT2PxNFzEkPPSQw9JzH0XJ9rchdCCCFE15KELoQQQvQBktDPUE+dpOavb6HWVnd3UYQQQogOk4TerKaK+s8/Rj2wp7tLIoQQQnSYJPQzHFHDKO8/Bse+vd1dFCGEEKLDJKGfUVGhsi3xt1QdK2t11johhBCiJ5OEfoZ/gBaAWvyh6Gj3FkYIIYToIEnoZ/j4Kmi1UOfbH3VfRncXRwghhOgQSehnKIpCYLCB2tAY1GxJ6EIIIXoXSehnCQo2UOcfCXn7UW313V0cIYQQot0koZ8lyKzHih8uF3Awq7uLI4QQQrSbJPSzBAUbUFGoD4qWZnchhBC9iiT0swQG6wGojb0SNWuXDF8TQgjRa0hCP0uQ2QBAfWQSlJ2Cks5byU0IIYToSpLQz2IwaDCaFGr9IwGk2V0IIUSvIQn9HP6BWuoajdAvUsajCyGE6DUkoZ9Ra3eyKbcUxQi1NS6UpLFwKAu1wd7dRRNCCCEuStfdBegpDpVa+f+/KSJJ8WWCNpAl+ilEDPXF8Po/0Gi1KHodilaHXqtg0IBOo2DQKhi1CgatBqNOg0GvRaPVgKIBjRY0Gvd+Bq2CQaPg0Gixa3RY0WNTtCiKpuk8Og0GnQYUBbtLwa6C3aWgKAo+egUfnQZfnQYfvQY/gwYfgxatVtt0LQVAAUVp+lejAY0GVdHgUhRUQFVBRUVVwaUqOFFxoUFFRavVotNo0GpAqyhNpzmLQtPEO0IIIXouRe3lXblPnOicjmsNThfV+JKxuxh7HhwIqiOnsgKHy4VLBVVVcaoKDhQaFS0upfsbN0xOOwZnI05Fg1OjxalompK0onRZ+RTV1ZTgVZWmWwXO/Ht+wm/erlVdaFRX07+oaM4cq6CitH4oGlUFVDSACrgU5cwNCKit3Fw0l6n5uqrivsKZPZq+U1A5v7jKmT3Of+Xs8zZf9+x37T6/8tP3rf1CnX1tzVln0LRanuYjfjqTeu71ztu79V/j1vZXWmzvOMX9b9tHX6ycP/2vtFWyto9tN0VpupNtQ2ufI2+7lBh6V8vP4aWe4XLz5/8z3v29xWKhtLS0U88/YMCAVl+XGvoZBq2G4RZ/fOID+HdeDXfEWYiOiWxzf6dLpdGl0uBwYXeq2B0urPZGcLrA5QSXE9Xlwul0YXe4aHQ4aXC40KpOTLjwwYEJBzhd2J0uGpwqdkfTn22j4sKIC4Oi4nKpWF1gdTZ91Tuh3qlQ74I6p0KjClqcaGlAqzQlSw0uFLXpe1QVjfJTQlHOJFQtTYlFQcWpgkMFFwqOc393VfWsGj7QXNPnp5p/i194VUWn0+FwOFDPHNuUjBWcZ77OPtbFub/wqvsPmYumP2qK2pT4NKhnbiTOKWJzCZTm75Uz7+2nP5hN11Tdr7T1h7Tl/mf+qCo/vcefrt2c7M98r3Le9hbfKk3xbXpfPyX+5msoasvjtBotTpfznLL9dFNwsT+zbdwntbG9rVuQtl4/P0+qCme9h+Y9W77gvuE5twXo3M9Qq+XtaFpQ0Wg0uFyuC+7VkbNeLKbn7a9e/P/gYs793egIFaXjN0HnXl+jQb1ADNvzORPeIwn9jNraWvLz8+kX3g+Npuk5+oVoNQpajYJJd3ZN2Ni1hewluuKO9HIjMfScxNBzEsPepfvbjXuIyspK1q5dS2lZKb7+GmprnBc/SAghhOghvFpDd7lcLFmyhJCQEJYsWdJiW3Z2Ni+88ALh4eEAXHXVVcybN89rZTObzQBUVFTgHxBKbbUkdCGEEL2HVxP6unXriIyMxGq1tro9ISHhvETvLb6+vphMJsrLy4kI1VByohGXS0WjuRy7dAghhOhtvNbkXlZWRkZGBtddd523LtkhiqJgsVjO1NC1qCrU1134OboQQgjRU3ithv7BBx8wf/78NmvnADk5OTz++OOYzWZ+8YtfMHDgwPP2SU9PJz09HYDly5djsVg6rYzh4eHk5uYSOTCE3Tvq0eCPxeLXaee/XOh0uk79f7kcSQw9JzH0nMTQc96MoVcS+q5duwgKCiImJobs7OxW9xkyZAh//OMfMZlMZGRk8OKLL7JixYrz9ktJSSElJcX9c2f2wAwJCaGmpoZ6W9M5Tx6vwDeg7RsQ0TrpGes5iaHnJIaekxh6zpvj0L3S5H7o0CF27tzJgw8+yGuvvca+ffvOS9bNz7ABxo4di9PppLq62hvFcwsLCwOgrq4Kg1G56NA1IYQQoqfwSg39zjvv5M477wSaerOvXbuWBQsWtNinsrKSoKAgFEUhLy8Pl8tFQECAN4rn1twsUl5ejn9AlAxdE0II0Wt068QyGzduBGDmzJls27aNjRs3otVqMRgMPPLII16fP9xsNqMoCpWVlfgHDKL4RKNXry+EEEJcKq8n9MTERBITE4GmRN4sNTWV1NRUbxenBZ1OR1BQEOXl5YQN1dCQr9LY4EJvkPl3hBBC9GySqc5hNpvdQ9cAaqvlOboQQoieTxL6OUJCQqisrMTXv+nnGpkxTgghRC8gCf0cZrMZl8uF01mLRgs1VVJDF0II0fNJQj9H85zulVWVBARqpYYuhBCiV5CEfo6zF2kJCNJQUyUJXQghRM8nCf0cJpMJHx8fysvLCQjSYrOqNDRIs7sQQoieTRJ6K5p7ugcENvV0l+foQgghejpJ6K0ICQk50+TenNCl2V0IIUTPJgm9FWazGZvNBooNnU4SuhBCiJ5PEnor3D3dKysJCNJKQhdCCNHjSUJvRcue7lqqq1yoqtrNpRJCCCHaJgm9FQEBAWi1WndCb2xQsdskoQshhOi5JKG3QqPREBwcTHl5OYFBTSGSZnchhBA9mST0NriHrklPdyGEEL2AJPQ2hISEUF1djVbnwmBUqJFV14QQQvRgktDbYDabUVWVqqoq6ekuhBCix5OE3oaQkBAASktLCQjUUF3llJ7uQggheixJ6G0IDQ3FYDBw/PhxAoK0OB1grZeELoQQomeShN4GjUbDgAEDOH78OIHSMU4IIUQPJwn9AqKioqioqECjswKS0IUQQvRcktAvIDIyEoBTp09i8lGoloQuhBCih9J582Iul4slS5YQEhLCkiVLWmxTVZX333+fzMxMjEYjDzzwADExMd4s3nnCwsIwGAwUFRUREDRAllEVQgjRY3m1hr5u3Tp3rfdcmZmZFBcXs2LFCn7961/z7rvverNorTr7OXpAkJbaaicul3SME0II0fN4LaGXlZWRkZHBdddd1+r2nTt3MnXqVBRFIS4ujrq6OioqKrxVvDZFRkZSUVGBwWjF5YL6WqmlCyGE6Hm8ltA/+OAD5s+fj6IorW4vLy/HYrG4fw4NDaW8vNxbxWtTVFQUALXWEgCqK+U5uhBCiJ7HK8/Qd+3aRVBQEDExMWRnZ7e6T2uTtrSW/NPT00lPTwdg+fLlLW4CPKXT6c47n9lsxmg0YrVWYDBGUlWhZdTYzrtmX9RaHEXHSAw9JzH0nMTQc96MoVcS+qFDh9i5cyeZmZk0NDRgtVpZsWIFCxYscO8TGhpKaWmp++eysjL3uuRnS0lJISUlxf3z2cd4ymKxtHq+/v37c/jIYRJjkzl2pJb4kRo0mtZbGkTbcRTtJzH0nMTQcxJDz3VFDAcMGNDq615J6HfeeSd33nknANnZ2axdu7ZFMgcYN24cX3/9NZMnTyY3NxdfX99WE3p3iIyM5OjRowSHNnL8GJSXOrCE67u7WEIIIYSbV4etnWvjxo0AzJw5kzFjxpCRkcGCBQswGAw88MAD3Vm0Fpqfo9sdJWg0/SguapSELoQQokfxekJPTEwkMTERaErkzRRF4d577/V2cdolLCwMvV5PcfEJwiIiKT7eSOIYtc0OfkIIIYS3yUxx7aDRaIiMjKSoqIiISD3WelV6uwshhOhRJKG3U/N49IDgBlDgZFFjdxdJCCGEcJOE3k7Nz9FLTh0nxKKl+LgkdCGEED2HJPR2CgsLw8/Pj8OHD9M/Uk9NlYu6Wml2F0II0TNIQm8njUbDsGHDOHr0KCHhTZPgFEuzuxBCiB5CEnoHxMbG4nQ6KTlVQGCwNLsLIYToOSShd0D//v3x8/MjNzeXiEg95aVO7DZZrEUIIUT3k4TeAYqiEBsb29TsHnam2V1q6UIIIXqAbp0prjeKjY1l9+7dlJYfxT8wkqJjDQwaauzuYgkh+iBVVbHZbLhcrm6ZyKqkpAS73e716/YllxpDVVXRaDSYTKZ2/99LQu+giIgI/P39ycvLY/jQIRzMslFX68TPX9vdRRNC9DE2mw29Xo9O1z1/qnU6HVqt/G3zhCcxdDgc2Gw2fHx82rW/NLl3UHOz+7Fjx7BENDW7Hz8mze5CiM7ncrm6LZmL7qfT6XC52t9PSxL6JYiNjcXlclFcchRLuI7Cow2trucuhBCekPUiREc+A5LQL0G/fv0ICAggNzeXqMEG6mtdVJTJJDNCCCG6jyT0S9Dc7F5QUIA5zIlWC0VHG7q7WEII0anmzp3LN9980+K1v/zlLzzxxBMcOXKEX/7yl0yaNInU1FTmzZvHtm3b3Ptt2rSJWbNmMXXqVGbMmMH999/P8ePH27zWCy+8QEpKCjNmzODnP/85xcXFADQ2NrJw4UKuu+46pk2bxhtvvHHBMq9YseKS3uuiRYvIycnp8HEvv/wyb7/99iVds7NJQr9Ezc3ux44dISJKz4mCRpxOaXYXQvQdc+fO5Ysvvmjx2hdffMGcOXP45S9/yfz589m6dStff/01S5cu5dixYwAcPHiQp59+mtdee43NmzeTlpbGrbfeSmFhYZvX+s1vfkN6ejppaWmkpKTw6quvAvDVV1/R0NDAv/71L77++ms++uijC56nrYSvquoFn0e/9NJLxMXFtbm9N5DeFpcoPDycoKAgDh06xNWT4jh+rJGSE40MGGjo7qIJIfog1z/+glqY36nnVAYOQXPHfW1unz17Ns8//zx2ux2j0UhhYSElJSUcOXKEK664gpkzZ7r3HT58OMOHDwfgrbfeYsGCBcTGxrq3n71vawICAtzf19fXu58dK4pCfX09DocDq9WKXq/H39+/1XMsW7YMm83GjBkziI+PZ/HixcyfP59Jkyaxa9cuVq5cyZtvvsmePXuw2WzMmjWLRYsWATBv3jx+97vfkZycTGxsLPfccw/p6emYTCbef/99wsLCLhJN2LdvH0uWLMFmszFo0CBefvllLBYL7733Hh9++CE6nY7Y2Fj+9Kc/8cMPP/DMM8+43+Onn37a5vtqL6mhXyJFURg+fDhFRUWYfG2YfBRpdhdC9CkhISGMHj3a3ez+xRdfcPPNN3Po0CFGjhzZ5nE5OTkkJSV1+HrLly9n3LhxfPbZZzz++OMAzJo1C19fX8aMGcOVV17J/fffj9lsbvX4J598EpPJRFpaGm+++SYAhw8fZt68eWzcuJGoqCgWL17M+vXrSU9PZ9u2bezfv/+889TX1zN27FjS09OZMGECH3/8cbvK/8gjj/DUU0+Rnp7O8OHDeeWVV4CmG5wNGzaQnp7O8uXLAXj77bdZtmwZaWlpfPbZZ5hMpg7H61xSQ/dAfHw827dvJyc3h8hBIzhyyI7d5sJokvskIUTnulBNuivNmTOHL774guuvv54vvviCV155hTVr1rTY55577iE/P5+YmBjefffdFtvKy8u5/fbbsVqtzJ8/n/vvv7/Nay1ZsoQlS5bwxhtv8P7777No0SJ2796NVqslIyODqqoq5s6dy9VXX82gQYPaVf6oqCiuuOIK989r167l448/blqXo6SE3NxcRowY0eIYg8HAjBkzABg5ciRbtmy56HWqq6upqqpi4sSJANx2223853/+JwAJCQk89NBDpKamkpqaCsD48eN57rnnmDt3LjfccAMDBgxo1/u5EMk8HggODiYiIoKDBw8SNciAqsLxAhmTLoToO1JTU/nuu+/IysrCZrMxcuRI4uPjycrKcu/z3nvv8eqrr1JZWQlAXFwc+/btA5pq+WlpacyfP5+6urp2XXPu3LmsW7cOgM8++4zp06ej1+uxWCyMHz+ePXv2tLv8vr6+7u8LCgp45513WLVqFenp6Vx33XXYbLbzjtHpdO4mf61Wi8PhaPf1WvO3v/2Nu+++m71795KamorD4eChhx7ixRdfxGazcdNNN5GXl+fRNUASusfi4+MpKyujwVFBYLBWmt2FEH2Kn58fEydO5NFHH2XOnDlAU619586dbNy40b2f1Wp1f//AAw+wYsUKcnNzW93emiNHjri/37hxI0OHDgUgMjKS77//HlVVqa+vJyMjg2HDhrV5Hr1eT2Nj6xWrmpoafHx8CAwM5PTp02zatOmCZeqIwMBAgoKC2L59OwBr1qxhwoQJuFwuTpw4weTJk3n66aeprq6mrq6Oo0ePkpCQwIMPPkhycnKnJHRpcvdQXFwcW7Zs4eDBgwwcPI7s3TZqqpwEBMl0iUKIvmHOnDnce++9/OlPfwLAx8eHv/71rzz33HM8++yzWCwW/P39WbBgAdDUxPzcc8+xcOFCamtrMZvNREZG8thjj7V5jeeff57Dhw+j0WiIjIx0P2u+++67+e1vf8u1116Lqqrcfvvt5zWRn+2uu+4iJSWFkSNHsnjx4hbbEhMTSUpK4pprriE6Oprx48d7GpoWXnvtNXenuOjoaF555RWcTicPP/wwNTU1qKrKfffdR1BQEC+++CJbt25Fo9EQFxfHNddc4/H1FdULU5w1NDTw7LPP4nA4cDqdTJgwgZ/97Gct9snOzuaFF14gPDwcgKuuuop58+Zd9NwnTpzotHJaLBZKS0s7fNzatWs5deoUd/78//Cvr2oZOtxIwqj2zb3bF11qHMVPJIae6wsxrK+vb9Fk7G06nc7j5ubLnacxbO0z0Nbzdq/U0PV6Pc8++ywmkwmHw8EzzzzD6NGjzxvzl5CQwJIlS7xRpE4VHx9Pfn4+pWUnCYswU3S0geFJJhSNTNsohBDCO7yS0BVFcXfJdzqdOJ3OPjVHcUxMDAaDgYMHDzIifhoZP9RTetpBWD99dxdNCCF6lCeffJIff/yxxWv33nsvt99+e4fOM3v27POWJV2xYgUJCQkel7E1r7/+Ol999dV5ZVi4cGGXXO9SeO0ZusvlYvHixRQXF3P99de3mHCgWU5ODo8//jhms5lf/OIXDBw40FvF84hOp2PYsGHk5uZy9ZRp6PRNU8FKQhdCiJaWLVvWKec5N7l2tYULF/ao5N0arzxDP1tdXR0vvfQS//Ef/0F0dLT79fr6evdi7hkZGXzwwQetzsmbnp5Oeno60DQJQUND5/Uq9+RZx5EjR/jggw+47bbbqC7tx5GcGu741RD0+stvIIE8d/OcxNBzfSGGJSUlGI3G7i6G6EZ2u51+/fq1eM1gaH1GUq/3cvfz82PEiBHs3r27RUI/+6H/2LFjee+996iuriYwMLDF8SkpKaSkpLh/7sxOL550ogkICCAgIIDt27dz9eTZ5OxXyd5TTNTgy28q2L7QGam7SQw91xdiaLfb0Wq7b8RMX7gp6m6extBut5/3OW6rU5xXqo/N4+6gqcd7VlYWkZGRLfaprKx0rymel5eHy+VqMbdvT6coCgkJCRQUFKAz1OHrp6FQxqQLIYTwEq/U0CsqKnjrrbdwuVyoqsrEiRO54oor3JMSzJw5k23btrFx40a0Wi0Gg4FHHnmk13WcGzFiBDt27ODgwYNEDhpF7gE71noXPr6XX7O7EEII7/L6M/TO1hPGoZ/ts88+o6qqiv/v1vl8s76W4aNMxCZ4Pul+b9IXmjq7m8TQc30hht09Dv22227jwQcfZPr06e7X/vKXv3DkyBHuu+8+fv/735OXl0dgYCD+/v4sWrSICRMmAE3rob/00kvU1NRgNBoZOnQov/vd785rnW32wgsvsHHjRhRFwWKx8OqrrxIREUFjYyOLFi1i3759OBwO5s2bx8MPP9xp7/Gqq65i/fr1hISEtLo9Nja2xYx3HeXNcehSdexkiYmJVFdXU1F5gtBwHUfz7LhcvfqeSQhxmeqN66Ffztrd5L5v3z7Cw8MJDw+noqKCjz/+GI1Gw5133klwcHBXlrFXiYmJwWg0sn//fkaPvI4fv6ujuKiRAdGXX+c4IUTneXdnCfkV5y8k4okhZhP3juvX5vbeth76X//6VwoLC3n66acBWLVqFVlZWSxdupRf/epXnDhxArvdzj333MP8+fMvEp2WVFVl6dKlbNq0CUVRWLBgAbfccgslJSX85je/oaamBqfTyfPPP8+4ceN47LHH2Lt3L4qicPvtt/PrX/+6Q9e7FO2uob/33ntoNE27/+1vf3NPDvPOO+90WeF6I51OR3x8PIcPHybI7MDXX8ORHPvFDxRCiB6mt62HPnv2bNavX+/+ee3atdx8880AvPzyy3z99desW7eOlStXUl5e3qGyrVu3juzsbNLS0vjHP/7B0qVLKSkp4bPPPmPatGmkpaWRlpZGYmIi2dnZFBcX8+9//5tvv/22w5PmXKp219DLy8uxWCw4nU727NnDH//4R3Q6nXu9V/GTxMRE9u7dS05uDjGxw9mXaaWizIE5VNbCEUJcmgvVpLtSb1oPPTQ0lOjoaHbt2sWQIUM4fPiwewGWlStXupP9iRMnyM/Pb/O5eWt27NjBnDlz0Gq1hIWFMWHCBPbs2cPo0aN57LHHcDgcXH/99SQlJREdHU1BQQFPP/00M2fOZMqUKe2+jifaXUP38fGhsrKS/fv3ExUV5Z7KVcYoni8sLIywsDCys7MZOMSATo/U0oUQvVJvWw/95ptvZu3ataxbt47U1FQURWHr1q1s2bKFtWvXkp6eTlJS0nnTxl5MW/3HJ0yYwJo1a4iIiGDhwoWsXr2a4OBg0tLSmDhxIitXrmTRokUdutalandCT01N5YknnmDFihVcf/31QFPHh7Z6LF7uEhMTKS0tpbziNNExRk4WNmKtd3V3sYQQokN623roN9xwAxs2bODzzz93N7fX1NQQFBSEj48PeXl5ZGRkdCACTSZMmMCXX36J0+mkrKyM7du3M3r0aIqKirBYLNx1113ccccdZGVlUV5ejsvlYtasWSxZsqTFzU9Xancb8Jw5c7jyyivRaDREREQATXdeF2o+uZzFxcXx3Xff8cMPP5By3WyO5NjJz7UzIvnyXVZVCNE79ab10IODg91DzcaMGQPA9OnT+fDDD0lJSSEmJoaxY8d2OAY33HADu3btYsaMGSiKwlNPPUV4eDiffPIJb7/9NjqdDj8/P15//XVOnjzJo48+isvlQlEUnnjiiQ5f71Jc8jj0ffv2odFoLhhYb+hp49DPlpWVxaZNm5gwYQIaRyKlxQ5Sbg5Ep+tdE+Z0VF8Y/9vdJIae6wsx7O5x6DL1q+d65Dj0Z599loMHDwLw+eef8/rrr/P666/z6aefXnJB+7qkpCTi4+PZtm0bPoGnaGxUKcyX6WCFEEJ0vnY3uRcWFhIXFwfAv/71L5599llMJhO/+93vuPXWW7usgL2Zoihce+21nD59mu+3pjFs4M3kHVCIjjGg1fbtWroQQrSmJ66H3twT/1yrVq3qUE/47tbuhN7cMl9cXAxAVFQUQLt7LV6u9Ho9s2bN4h//+AfF5d/ir03haJ6dofGX13SwQggBPXM99Oae+L1du5vc4+PjWblyJR9++KF7XF9xcXGvWhGtu5jNZlJSUigtK8Gm7iV3v53GRpkOVgghROdpd0J/8MEH8fX1ZdCgQfzsZz8Dmjqk3XjjjV1WuL4kNjaWxMRESkr3UVtXypFDMi5dCCFE52l3k3tAQAB33nlni9cupev/5WzKlCkcPXqUKus2Dh+8gcGxBoxGWR9HCCGE59qd0B0OB59++imbN2+moqICs9nM1KlTufXWW9HpZErT9jAajUybNo1169ZRrjtA3oGxJI6WcelCCCE81+7q4UcffURWVhb33XcfL774Ivfddx/79u3jo48+6sry9TlDhw4lJiaGyrrd5B4oldnjhBA91ty5c90LszT7y1/+whNPPMGRI0f45S9/yaRJk0hNTWXevHls27bNvd+mTZuYNWsWU6dOZcaMGdx///0cP368zWtVVFRwxx13MHnyZO644w73NLKNjY0sXLiQ6667jmnTpubkQQ0AACAASURBVPHGG29csMwrVqy4pPe6aNEicnJyOnzcyy+/zNtvv31J1+xs7U7o27Zt4//+3/9LcnIyAwYMIDk5mUWLFvHDDz90Zfn6HEVRmDZtGlqthtNV2zi078LTIQohRHfx5nrob731FlOmTOH7779nypQpvPXWW0DH10NvK+GrqorL1XYF6qWXXnIPze6tOjxsTXguICCASZMm8e2337J//36GDR+Lf6C2u4slhOjB9mXUU13p7NRzBgZrSRrb9kx03lwPfcOGDfzzn/8E4LbbbmPevHk89dRTHVoPfdmyZdhsNmbMmEF8fDyLFy9m/vz5TJo0iV27drFy5UrefPNN9uzZg81mY9asWe6FU+bNm8fvfvc7kpOTiY2N5Z577iE9PR2TycT7779PWFjYReO5b98+lixZgs1mY9CgQbz88stYLBbee+89PvzwQ3Q6HbGxsfzpT3/ihx9+4JlnngGaKnqffvppm++rvdpdQ584cSJ/+MMf2L17N0VFRezevZsXX3yRiRMnelSAy9WoUaMYMCCS0uptfLtpb3cXRwghzuPN9dBLS0vp169pidh+/fpRVlYGdGw99CeffBKTyURaWhpvvvkmAIcPH2bevHls3LiRqKgoFi9ezPr160lPT2fbtm3s37//vPPU19czduxY0tPTmTBhAh9//HG73sMjjzzCU089RXp6OsOHD+eVV14Bmm5wNmzYQHp6unuO+rfffptly5aRlpbGZ5995l7B1BPtrqHPnz+fNWvW8N5771FRUUFISAiTJk2SeX4vkaIo3HLLzaxetZbDx7bw3WYHU6aO7+5iCSF6qAvVpLuSN9dDb01H1kNvTVRUFFdccYX757Vr1/Lxxx/jdDopKSkhNzf3vDVJDAYDM2bMAGDkyJFs2bLloteprq6mqqrKXcm97bbb+M///E+gabGahx56iNTUVFJTUwEYP348zz33HHPnzuWGG25oc372jmh3DV2n03H77bfzxhtv8NFHH7FixQpuvfVW1q5d63EhLld6vZ5b591MgO9gMnb/wHfffSePNoQQPYq31kO3WCyUlJQAUFJSQmhoKNDx9dDPdfbCJgUFBbzzzjusWrWK9PR0rrvuOmw223nH6HQ6FKVpem6tVutxxfVvf/sbd999N3v37iU1NRWHw8FDDz3Eiy++iM1m46abbiIvL8+ja0AHEnprmt/wxTQ0NPDEE0/w+OOP8+ijj/LJJ5+ct4+qqqxcuZKHH36YRYsWtVgbty8zmXRMvXomAT7xZGRk8N1333V3kYQQws1b66HPnDmT1atXA7B69Wquv/56oOProev1ehobG1vdVlNTg4+PD4GBgZw+fZpNmzZd5N23X2BgIEFBQWzfvh2ANWvWMGHCBFwuFydOnGDy5Mk8/fTTVFdXU1dXx9GjR0lISODBBx8kOTm5UxK6VwaQ6/V692IuDoeDZ555htGjR7foUZiZmUlxcbH7Q/Duu+922py/Pd3gYUaOHLqKUxUqu3fvJiEhAYvF0t3FEkIIwDvroT/44IPcf//9/M///A+RkZG88847QMfXQ7/rrrtISUlh5MiRLF68uMW2xMREkpKSuOaaa4iOjnZPY95ZXnvtNXenuOjoaF555RWcTicPP/wwNTU1qKrKfffdR1BQEC+++CJbt25Fo9EQFxfHNddc4/H1L7oeenOzSWscDgfPP/88q1atavcF7XY7zzzzDPfee2+LHpB//vOfGTFiBFOmTAFg4cKF/P73v2+z80OznrweekcU5jewa1sFxZWfE94vjLlz57a7BaSn6QvrUHc3iaHn+kIMZT303s+b66FftIbefEfWlvbWJF0uF4sXL6a4uJjrr7++RTKHpo4TZ58rNDSU8vLyiyb0viJqkJ7Dh3xodI2hqGgbR44cYejQod1dLCGEEL3ERRN68+B+T2k0Gl588UXq6up46aWXKCgoIDo62r29tYaC1mqo6enppKenA7B8+fJObZrW6XTd2tQ9abofX3/uIDAgl61btzJ27Fj0en23ledSdXcc+wKJoef6QgxLSkq6fWrtzr7+kiVL2LFjR4vX7rvvPn7+85936Dypqak0NDS0eO3NN9+8YHO8J1599dXzOoHfdNNN/Pa3v73osZ7E0Gg0tvtzfNEm966wevVqjEYjN998s/u1y73JvdnO7+vIP3qMk2XpTJo0iXHjxnVreS5FT4hjbycx9FxfiKE0ufd+3mxy98pSX829+qCpx3tWVhaRkZEt9hk3bhybN29GVVVycnLw9fW9bJrbzzZitA++xgGEBEfz448/XnCYhxBCCNHMK205FRUVvPXWW7hcLlRVZeLEiVxxxRXuIQ8zZ85kzJgxZGRksGDBAgwGAw888IA3itbj+PppGDbcRPaesVQ6v+Srr75i+PDhDBo0iODg4O4unhBCiB6qW5rcO1Nfa3IHcDpUNq2vpro+l2rrPqqrq4GmcY5XXHHFBadc7Al6Shx7M4mh5/pCDKXJvffrUb3chfdpdQqJY3zY+f0wpk5KIiTcxrFjxzhw4ACbN28mNja2U+b9FUII0Xd45Rm66LiISD2WfjoO7LWhIYDk5GSuvfZanE5ni9mXhBCiq/TG9dA76qqrrqK8vLzN7ecOse7JJKH3UIqiMOYqX/R6hR+/r6OhwUVYWBihoaEcOHCgu4snhLgM9Mb10C9n0uTeg5l8NIyb7MfWTbVkbqvnyqv9SEhI4LvvvqOiouKyHAUgxOVq8+bNnD59ulPPGRYWxtSpU9vc3tvWQ//rX/9KYWEhTz/9NACrVq0iKyuLpUuX8qtf/YoTJ05gt9u55557mD9/fvuCdIaqqixdupRNmzahKAoLFizglltuoaSkhN/85jfU1NTgdDp5/vnnGTduHI899hh79+5FURRuv/12fv3rX3foepdCaug9XIhFR9IYH06ddJCTbSM+Ph5FUaSWLoTocr1tPfTZs2ezfv16989r1651z3fy8ssv8/XXX7Nu3TpWrlx5wWb21qxbt47s7GzS0tL4xz/+wdKlSykpKeGzzz5j2rRppKWlkZaWRmJiItnZ2RQXF/Pvf/+bb7/9lttvv71D17pUUkPvBQYNNVBZ5iQn206Q2Y9BgwZx4MABJkyYgEYj92RCXA4uVJPuSr1pPfTQ0FCio6PZtWsXQ4YM4fDhw+4FWFauXOlO9idOnCA/P5+QkJB2l2PHjh3MmTMHrVZLWFgYEyZMYM+ePYwePZrHHnsMh8PB9ddfT1JSEtHR0RQUFPD0008zc+ZM94RpXU2yQS+gKAojr/AhyKxl9/Z6YmLiqauro6ioqLuLJoTo43rbeug333wza9euZd26daSmpqIoClu3bmXLli2sXbuW9PR0kpKSsNvtHYpDWyO8J0yYwJo1a4iIiGDhwoWsXr2a4OBg0tLSmDhxIitXrmTRokUdutalkoTeS2h1CldM8kVFpex4OEajUZrdhRBdrreth37DDTewYcMGPv/8c3dze01NDUFBQfj4+JCXl0dGRkYHo9CUuL/88kucTidlZWVs376d0aNHU1RUhMVi4a677uKOO+4gKyuL8vJyXC4Xs2bNYsmSJS1ufrqSNLn3In7+WpLH+7Jraz2WkBgOH851d1YRQoiu0pvWQw8ODiY2Npbc3FzGjBkDwPTp0/nwww9JSUkhJiaGsWPHdjgGN9xwA7t27WLGjBkoisJTTz1FeHg4n3zyCW+//TY6nQ4/Pz9ef/11Tp48yaOPPorL5UJRFJ544okOX+9SyExxZ+ktM0tl7arn4IHjnCxfz7XXXtvhziddrbfEsSeTGHquL8RQZorr/frc4iyic40Y7UOYpR8GXRC7dmVw8uTJ7i6SEEKIbiZN7r2QVqswbrIfp0vHcqpqK6tXr2bgwIFceeWV561iJ4QQPcmTTz7Jjz/+2OK1e++9t8NDu2bPnn1ex7YVK1aQkJDQ4TI198Q/16pVqzrUE767SZP7WXpbE93JogZ2fFeFU5NHWdU+rFYrUVFRTJ8+vVs/hL0tjj2RxNBzfSGGdXV1+Pn5ddv1pcndc57GsLXPgDS590H9owxcMSEIvZrAiGG3MWXK1Zw+fZq///3v7NixA6fT6d7Xbrdz4MABduzYQX19/SVdr6qqio8//tg94YMQomtpNBpJqJcxh8PRoblGpMm9l4sabMDlUtnzo5V+uljuujOOLd9tZtu2be7ZmgoKCigoKMDlcgGQmZnJhAkTGDlyZIc+LJmZmZSVlXHo0CEmTZrUVW9JCHGGyWTCZrNht9tRFMXr1zcajR0ery1autQYqqqKRqPp0MqaktD7gOgYIy4XZO2yoih6Zs5MZfjw4XzzzTds3ryZgICm1dpiY2PR6/Vs3ryZb7/9ln379jF9+vR2PXdvruEDHD16VBK6EF6gKAo+Pj7ddv2+8Niiu3kzhpLQ+4jBw4yoLtiXaWXHljrGTx7M/PnzqampwWw2t7i7nzNnDocPH2bLli2sWbOGm266iSFDhlzw/NnZ2TQ2NhIfH8+hQ4eoqakhICCgq9+WEEKIdpJn6H3IkDgjo6/0peyUg62banE5tYSEhJzXVKcoCsOGDWP+/PmEhYWxYcOGCy5U4HK52LNnD5GRkYwbNw5oqqULIYToOSSh9zEDhxgYN9mPmmon3/+7lvo6V5v76vV6Zs+ejVar5X//93/bfM5z5MgRampqGD16NCEhIQQGBkpCF0KIHkYSeh8UEalnwjR/7DYX3/+rhvLStnvJBgQEcOONN1JVVcWGDRvcHefOtnv3bgIDAxkyZAiKojB48GAKCwul960QQvQgktD7qNAwHZOvDUCjUdi6qZb8XHubqwVFRkYydepUjh49yrZt21rsV1JSwokTJ0hOTnb3iB88eDAOh0NWexNCiB5EEnofFhisZepMf8IjdOzLsJK5rR6Ho/WkPnLkSBITE9m5cyd///vfyczMpL6+nt27d6PX61sshhAVFYVOpyM/P99bb0UIIcRFeKWXe2lpKW+99RaVlZUoikJKSgo33nhji32ys7N54YUXCA8PB+Cqq65i3rx53ihen6Y3aBg/xY+8A3YO7rNRVVlD8nhfQiwt/+sVRWH69On069eP7OxstmzZ4l6ycNSoUS1WdNPpdAwcOJCjR4+iqmq3jI8VQgjRklcSular5Re/+AUxMTFYrVaWLFnCqFGjiIqKarFfQkICS5Ys8UaRLiuKohA7wkRwiJbdO+r5/l+1RMcYSBhlwmD8qZFGq9WSlJREUlISZWVlHDhwgOPHj7uXIDzbkCFDyM/Pp7y8nNDQUG++HSGEEK3wSkI3m82YzWagaR3dyMhIysvLz0voomuFRei55oZADmXbyM+xU3y8kRHJPkQN1p9Xyw4NDWXKlCltnmvw4MEA5OfnS0IXQogewOvP0E+dOkV+fj7Dhg07b1tOTg6PP/44y5Yto7Cw0NtFuyzo9AqJo324ekYAfv4adu+o54dNtdRUOy9+8Fn8/f2xWCwyfE0IIXoIr662ZrPZePbZZ7n11lu56qqrWmyrr693z1ubkZHBBx98wIoVK847R3p6Ounp6QAsX76choaGTivf5baykKqq5OyvZucPZTgaXSSNNpM8zoxO3777vPT0dL777jsWLVqEv7+/+/XLLY5dQWLoOYmh5ySGnuuKGBoMhlZf91pCdzgc/OEPfyA5OZnZs2dfdP8HH3yQ559/nsDAwAvudzkvn9pZ7DYX+/dYKTraiK+fhuTxPlj66S96XHFxMZ988gkAgYGBmM1mQkJCSElJaXOSmqqqKkwmU4tOduJ8l+tnsTNJDD0nMfRcV8SwreVTvfIMXVVV3n77bSIjI9tM5pWVlQQFBaEoCnl5ebhcLpkr3EuMJg1jrvJj4BAHe3+s54dv6hgSa2D4KB90urZ7sPfr14/Zs2dz+vRpKioqKC8vp7CwkNLSUm655ZbzVnIrLi7mn//8J3q9nuTkZEaPHt2hlYSEEEK0zSsJ/dChQ2zevJno6Ggef/xxAH7+85+771pmzpzJtm3b2LhxI1qtFoPBwCOPPCLDobzMEq5j6vUBHNxrJT+3gVPFDsZc5Ys5tPWPiaIoxMTEEBMT437t0KFDbNiwge3btzNx4kT361arlfXr1+Pn50d4eDg7duwgMzOTUaNGMWbMGHx9fbv8/QkhRF/m1WfoXUGa3LtGaUkju3fUY7WqRA7UMyzBRGCwtl3HbtmyhczMTObOncvAgQNRVZUvv/ySwsJCbrvtNvr160dpaSk//vgjubm5aDQa4uPjSU5Ods9DcLmTz6LnJIaekxh6zptN7trf//73v+/UK3lZTU1Np53L19eX+vr6Tjtfb+brr2VgjBFVhaJjDeTnNlBd6cTPX4PJ58Kd5kaOHElWVhY5OTkMHz6czMxMsrOzmT59urs27+vrS2xsLHFxcU2d83Jy2Lt3L4WFhZjN5jYftzidTlRVPa85v6+Rz6LnJIaekxh6riti2NbfR0noZ5EPb0tarUJYhJ5BQw1otXCioJH83AYqyhz4+Grw8VVafSwSEBBAcHAwu3fv5tixY+Tm5hIfH8/EiRPP29/Hx4fBgwczatQofHx8OHbsGPv27aN///7ndYisqalh9erV7Nq1Cz8/P0JDQ/vsYxn5LHpOYug5iaHnvJnQ+3Y1R3QKg1FDfJIP190USMIoE1UVTrZuquX7f9dScqKx1UVfwsLCuPrqqyktLSUkJIRrr732gsnXaDQyduxY7rjjDgICAli7dm2Lxynl5eWsXr2auro6fHx82LBhA2vWrJHmQCGEOEOeoZ9Fnhe1j9OhUpDfQN5BG7Z6lcBgDcOGm+g/UI9Go7jjqKoqBw4cYODAgR0asVBXV8eaNWuoq6tjzpw5KIrCl19+iUaj4ZZbbiE0NJT9+/ezdetW7HY7/fv3x+l00tDQQENDA6Ghodx4441tjtXsDeSz6DmJoeckhp7z5jN0SehnkQ9vx7icKscLGsg7aKe22oWvn4ah8UaSr+hPVXW5R+eura1lzZo1WK1WVFXFx8eHOXPmEBwc7N7HZrOxY8cOSkpKMBgMGAwGtFothw4dIjIykptvvhmdzisDOTqdfBY9JzH0nMTQc5LQO0ASevdTVZWSEw5y99uoLHei0ysMiNIzcIgBs0V7yc+5a2pqWLNmDQaDgVtuuQU/P792HXfgwAHS0tIYOnQoN9xwQ6/sQCefRc9JDD0nMfRcn5tYRvRtiqIQEamn3wAdFaVOTp1QOJJXQ0F+A34BGmLijEQPMaDRdiyxBwQEMH/+fDQaTYeSckJCAjabjS1btrBp06aLPr8XQoi+QBK66DSKohASpiMuwcKwERpOFjVwNK+BrF1W8g7aiRthJGqwAY2m/cn1UpvMx4wZg9VqZefOnWi1WiZMmCCz0gkh+jRJ6KJL6PQKA4c0JfBTxQ4OZdnY86OVvAN2hg43EjnIcMFpZTvDxIkTaWhoYO/evezfv5+EhASSk5MJCQnp0usKIUR3kIQuupSiKPTrryc8QkfJCQeH9tnYu9PK/j1WBg4xMniYAf+A9s1AdynXnj59OomJiezZs4f9+/eTlZXFwIEDGTNmDIMGDZKmeCFEnyEJXXjFuc/Zj+bZOZpnJz/HjqWfjiGxRvr116F0oDm+vcLCwkhJSWHy5Mns27ePvXv38uWXXxIcHMzo0aMZMmQIJSUlHD9+nKKiIqxWK1dffTXx8fGXdL2amhr8/f3lZkEI4VXSy/0s0qOzc7Q3jjari4IjDRw7bMdmVfH10zB4mIGowQaMpq7rme50OsnLy2P37t2UlJS4X9fpdPTv35+GhgZKSkpISEhg2rRp7R7Prqoq27Zt48cffyQiIoJp06bRr1+/SyqjfBY9JzH0nMTQc9LLXVwWTD4a4hJNDEswUny8kfxcO/v32Ni/x4bZoiVigJ6ISD3+gZ3bJK/VaomPjycuLo7i4mKKi4uJiIggPDwcrVaLy+Vix44d7Nixg5MnT5KamnrRRWMcDgdpaWnk5uYyZMgQiouLWbVqFSNGjGDSpEn4+vqiqioNDQ1YrVasVis2mw2bzYbdbkev1+Pn5+f+an69mcFgkBq/EOKCpIZ+Frkb7RyexLG60snJokaKjzdSXekEICBQQ9RgA5GDDPj4em9MeVFRERs2bKCurg6dToevry++vr74+fnRr18/BgwYQHh4OA0NDXz11VcUFxczefJkxo4dS0NDAzt27GDPnj3uJYGtVisul+uSyhIUFMTo0aNJSEjo1TPgeZP8PntOYug5mVimAySh9zydFUdrvYvi440cP9ZARVlTcrf00xHeX4d/gBa/AA2+fpoODYPrcBmsVg4cOEBtbS1Wq5X6+nqqq6upqqoCQKPRoNfrcTqdzJw5k2HDhrU4vry8nIyMDKBpIZqzv0wmEyaTCaPRSGNjI3V1de4vk8lEXV0diqLgcrnIy8ujpKQEo9FIUlISycnJ+Pv7d9n77gvk99lzEkPPSULvAEnoPU9XxLGuxknRsQaKjjVSX/tTLVdRIDBYS2S0nshBhosu7dpZ6uvrOXnyJCdPnqSqqopx48Zd8vPy1pwbQ1VVKS4uJjMzk8OHD6PT6Zg4cSKjRo3qlTPheYP8PntOYug5SegdIAm95+nKOKqqSoNdpa7WRV2Ni9oaJ6eLHVRV/FSDHzBQj6WfDj//rhkO5w0XimFlZSXffPMNBQUFhIeHc+2117b5jL/5ub3dbsfhcOBwOHA6neh0OkJCQtBqe2+MLkZ+nz0nMfScdIoTog2KomA0KRhNGkIsTa8ljILaGifHz9Tg9+60AuDjpyEsXEdYfx3h/fVdPpGNtwQHB3PLLbeQm5vL5s2bWbVqFf37929RU29sbKS+vp76+nqcTmer59FqtVgsFsLDw4mMjGTYsGHdUtuvq6tj9+7dxMXFERYW5vXrC9FXSEIXfYJ/gJb4JB/iEk3U1rgoLXFQWuLgZFEjBfkNaHXQP6qpWd4SruvS5+7eoCgKcXFxDBo0iO3bt3Pq1KkWHe6MRiNms9ndkc9kMqHT6dBqteh0Oux2O6dOnaKkpISDBw+SlZVFcHAwV155JXFxcZ2e2BsaGtDr9S166rtcLvbu3csPP/xAY2Mj+/fvZ968eZjN5vOOLygoICgoiKCgoE4tlxB9iTS5n0WalzpHT4qj6lIpO+2g6FgjJ4sacDSC3qBgDtViDtVhDtUSHKJDb+hZCd6bMVRVlSNHjrB9+3ZKS0sxm82MGjUKRVFwOBw0NjbicDho/lOhqiparZbw8HD69+/f6ip4jY2NHD9+nIKCAgoKCigvL8fX15f+/fszYMAAAgMD2bFjB6dPnyY6OpoxY8awceNGdDodt912m7vDn9PpZPPmzWRlZaHT6Zg8ebK7bM3q6+vZu3cvAQEBjBgxwr2tJ30OeyuJoefkGXoHSELveXpqHJ1OlZITjZw66aCyzEFNdVONVlEgLELHgIEGIiL1PSK5d0cMVVUlLy+P7du3U17ecj17jUaDoijuL6fT6W4RCAwMxGKx0NjY6B4J0LyOvVarZcCAAfTv35+qqipOnjxJdXU1AH5+fkydOpVhw4ahKAqnTp1izZo1BAQEMG/ePBwOB+vWraO4uJjRo0dTUVHBsWPHiIqKIiUlBa1Wy65du9i3bx8OhwNoWmnvmmuuQafT9djPYW8iMfRcn0vopaWlvPXWW1RWVqIoCikpKdx4440t9lFVlffff5/MzEyMRiMPPPAAMTExFz23JPSep7fEsbFBpbLcwekSBycKGrDWq2g0TR3rzKE6gsxagkO0XTprXVu6M4aqqlJTU4NOp3N/ndsE73Q6OX36tLunf1lZGQaDocVY/QEDBjBgwIDzVsyrra2lrKyM/v37nzemvrCwkC+++ILQ0FDq6upobGwkJSWF2NhYVFVl//79bN68GWhqsne5XMTHxzNu3DhycnLYsWMHFouFWbNmMXTo0F7xOezJesvvck/W5xJ6RUUFFRUVxMTEYLVaWbJkCY8//jhRUVHufTIyMvj666954oknyM3N5YMPPmDZsmUXPbck9J6nN8ZRVVUqy52cKGik5GQjdTU/PY82+SgEmbVnvpoSvclH6dKZ23pjDDtLXl4e69evJygoiFmzZhEaGtpie3V1NVu3bsVgMDB27FiCg4Pd244ePcqGDRsAuO666/Dz8yM0NNR946CqKna7nerqahobGzEajRiNRncfg7b+T1VVpba2luLiYkpKSiguLqa+vp7Y2FhGjBjR4tl+fX09eXl5nD59mqSkpE4dzuhtl/PnsLP0uV7uZrPZ3dHFx8eHyMhIysvLWyT0nTt3MnXqVHdnn7q6OioqKlrtICNEZ1MU5cwzdR2JY3xobFSprnBSWdE0JK6qwknJSQeoTdOxGoxnJ3ktoWG6bqnJ90XDhg3jzjvvJCAgoNVZ8QIDA0lNTW312MGDB3PHHXewfv161q1b5349KCgIvV5PdXU1DQ0NrR6r1Wrx9/fHz8+PgIAAtFottbW11NTUUFNT427W12q1hIWFERAQwM6dO/nxxx8ZOHAgAwcOpKCggOPHj7sfN2RnZ5OQkMDEiRNbTARkt9spLS2ltrbWPRrBbrcTEhJCZGQkFoulxc2F3W6noqKCyspKKisrqaqqoqqqitDQUJKSkggPD2+xv81mo7CwkMDAwF59QyE6xuu93E+dOkV+fn6rM2pZLBb3z6GhoZSXl0tCF91Cr1cIDdcRGv7Tr4jDoVJd2ZTcm5K9k8OH7KhnKvMBgRos/ZqOCQrW4uOnkfnXL9G5tfKOCAoK4vbbb0ev15OTk0NpaSmlpaU4HA53h7zmBG+327Hb7dhsNqxWK3V1ddTW1nLy5EkcDgcBAQGEhIQwePBgAgMDiYiIwGKxuMfv19TUcODAAbKzsyksLMRsNjN+/HhiY2Px9/dn586dZGZmkpeXx6hRo7Db7e5HFGfTaDQYDAZsNhvQNEqheaGgiooKjYqZCQAAFwJJREFUrFZri/0DAwPx9/fn0KFDZGdnY7FYSExMxOl0kp+fz4kTJ9ydGGNiYpg0aRIhISGtxquxsZGCggKOHDlCfX09QUFBBAcHExQU5J6pUCYv6h28mtBtNhsvv/wyd999N76+vi22tdby39ofw/T0dNLT0wFYvnx5i5sATzV3pBGe6ctxjIho+bPTqVJeaqf4uJWTx60U5lvJz22qAWp1CkHBBoLNegKDDQQG6wkMavoymi48oUtfjqG36HS6Fs3xXcFisTBkyBBSU1Opra0lICCgxd+tyMhIrr76ajZs2MCuXbswmUxERUUxatQooqKiCA4Oxs/PDx8fHxRFobKykqNHj3L06FEKCwvx8fEhISGB0NBQLBYLFosFs9ns7pdgs9nIyspi586dfPvttwD069ePq6++mtjYWPLz8/n+++/5+OOPGTt2LAkJCe4bGJvNRlFREXl5eTQ2NmIymTCbzRw6dKjFwkDNn8Xw8HBCQkIwmUz4+PhgNBrx8/PDYrG0GOngcDgoLCwkNzeXwsJC901TQ0MDjY2N7n4Zer0evV7v7nPR/G9ERARRUVEtWjRUVaWqqopTp05x+vRp901aeXk5AQEB7o6XzTdsBoMBvV7f7huRhoYGd6tHc0dOo9HYYh+n00lpaal7aubmaZtNJtMFJ2jy5u+y13q5OxwO/vCHP5CcnMzs2bPP2/7nP/+ZESP+X3v3HtvUef4B/HsuvsV2El9IQgIs5bK1bG1XFAaDlXYL4rfRVquqLmt3UyYmrUDVdhRU9g9C27ReGKKjCgqqoGWVNg1pA4mqu4iWsUu6DUhpEW3WkLYhQMjFTmI78eX4nPf3x7FPEkhKQkLimO9HMknsY/vNI5znvb+L8ZWvfAUA8OSTT2L79u3XbKFzDD333MxxNHSB3h4d0T4dsYiBaERHLKIjPjD8Y+YqMLv4izPL54p8ChRlMBHczDGcLLkWw3g8DqfTecN6bUKhEGw2GwoLC4fdPzAwgJMnT+K999676nAgt9uN+fPnY8GCBaioqICiKBBCIB6Po6+vD5qmobW1FeFwGKFQCLFYbMT3drlc8Pv9sNlsuHjxIjRNgyzLKCkpgcvlgt1uh91uh6Io0HUduq5bSyKzJw/G4/FhFQmv14tgMIiBgQGEw2FommY9lq18FBcXIxaLobOzc9hzs2w2G+x2+7CzEwCzVyKVSiGVSqG/v9/qGRnK5/OhtLQUqqpalYjRNmmy2WxWgh86TCSEgM1mw9q1ayf1UKVpHUMXQqC+vh4VFRUjJnMAqKqqwp///GesXLkSzc3NKCgoYHc7zTiyIsEfVOEPDv9o6brAQL+5XW1/VEdvWEdPKI1LbeYfKVkGigOK9VyvZ+Q/HDRzuVyuG/r6ow1TFBQUYNWqVViyZAmi0Sjsdrs1GXCkiYCSJFmrFYLBIObNm2c9ZhiGtZVwNhn29PQgHA4jHA6jr68Pt956Kz7zmc9gzpw5405imqahq6vLmnzY3d0Nt9uNxYsXw+/3IxAIwOfzXRVLIQQikQi6uroQj8ethK1pGpLJpFVpyA51ZCsYLpcLs2fPhtfrtW6apqGjowMdHR04f/48dF1HMBjE7bffjlmzZsHj8ViVgVQqZR11nO35SKVSw2J65SqPG2lKWuhNTU3Ytm0b5s2bZ/2ijz76qFV7XrNmDYQQ2LdvH959913Y7XZs2LABCxYsuOZrs4WeexjHsUvEDfSE0gh36wh3mRPwsp9IT6FsbX5TWGSOyTucN3Z2fT7h/8OJYwwnLu+Wrd1ITOi5h3G8fum0QG8ojWTcgQvnI+gJ6dBSgx9RSQZcLhlurzw4y75YQYGHE/CuxP+HE8cYTlzeLVsjorFRVQnBUhuCQT8qKg0IMXiyXHwgc+s3x+ZbmtJWa16SAadTgrNAhsslo8Ajo7BYQWGxArfnxp4ZT0S5gQmdKIdJkgSPV4HHe/UsWl0XiPaZy+gGYgbicQOJuEBfj472C5qV7GUF8BYOtuYLfWYXvmpjkifKJ0zoRDOUokgo9qso9l/9MTZ0gWjEQKRXN299ZpI//9HgpipujwxvsZnkvUUyvEUK3G4ZElvzRDMSEzpRHpKVwZ3ssoQQVgveSvS9Oi5fGFwOJMvmZDy3R4GimpUGRZVgs0nwFMooLDLH69mFT5R7mNCJbhKSJMFVIMFVIKOswmbdn9aEtV4+GjEQ7dMRjejQ0wK6bnbt6+nB1zGTvgKP15yc5/aa33uLFKgqEz3RdGFCJ7rJqbbBfexHk04LM+H3GYj0mRvn9IZ1XLqgAdlJ+BLg8Ziz7wszM+9dBeaNy+2IbjwmdCK6JlXNjtcPvz+7YU4solv73Ie707h4Xht2nSSbLXsJAKRsb4EMb6FstvYLZXgLMzPyFSZ+ouvBhE5E101RJHgLFXgLFcwePDwRWkpYy+wG+g0k4gYMA4Awx/INAxjoN65K/pIEuL1mcs9O1Css5mQ9orFgQieiSWezS7DZzWR8Ldnu/MG9783Z+e1DJ+spsLruHQ4ZdofZwne5ZRRkbnaHxMl6dFNjQieiaTVad3420Ud6zbH7RNxAMmEm/WRCDNtBL0uWzVPuVBVwe+JwuAx4vGZXfnYCn43r7ylPMaETUU4aTPQj/5lKp4XVpR+PGUilBPS0QDpz09Mywl0pXGwdPp7vcEpwe81WvcMpw+GQYHfIsNklyLK55E/JVAwcTgl2Byf00czAhE5EM5KqDo7fjyS7h7aezmyfG9PRHzUQy5x4192ZRiphjud/GkkC7A4JDqcMp0uC02V2/bsKZHMXv0KzYkA03ZjQiSivKapk7Wt/JSHMNfbJpAEtZSZ3QxfQDUDXBJIJgWTSQDIukEgYSCYE+no0JBPDu/ttdglujwxVlSArgCybX1VVgqpKUFRzeaDdPtgb4HBIcLllKJzVT5OECZ2IblqSJEG1Aart2pP3hjIMgUTcGDaRb6DfgK4LaJp5brihI9P1L5BOj/5aLrdsbtLjMVv6NpuUmVQoQbWZ8wGUTMWA6/np0zChExGNkyxLKHArKHArKJltu+b12Z6AVEoglTTH+1OJ7El6OmJRA70hDZr26adZZw/aya7bV1QJhp7pWTBEJunLcLgkOJ0yJBnW47oOKArMOQNOmbv65SEmdCKiG2ywJ0BCgXv08XbDENA0cwa/lspO7jO3501rZgUgGtER7kpfNdlvvBQVsNvNffrVzFdFlSBJ5rwBCRI83i5IctJaIshd/3IbEzoRUY6QZXNs3eG49rVpTcAQwhyvl80krKdhjfUnEwaEwLCZ+7oOJBMGkkmRWfpnzh1Ia+aKAT0NiMzmPwLA5UtpaKnhswZlGXBmt/R1SJnrM8+5ooNBkoACt7lc0O2R4XTJmWEJgXSmwpKdW2AOM8hmxUflngLXgwmdiGgGMs+zl664D/DYFHi8k/MewWAQ7Ze6hu36l/0+3m+gr9eAJAGyBGsnv6GNd10X6O5MDzvcZ6yyewrYMr0H2XkFkoTMjoOZC7M9CpmbEIAwYA1DSDKgKpJZqVEAh3P4hkSKCqQ1WEsehTFkq2IpU1mSYVWcZMWc15CLvRRM6ERENKrx7Po3EiHM3oD+mLk5kKoOJuhsMk1legpSySHDDGlhDTWkMkMQ8QGzt0Cy/sm8hzHYS5BNwmYilqz5C7punh440oZE10O1ATabWVEQBmAIYZVjaK5XlChW/Z9nSjY0YkInIqIbRpIka/3+6K6vsnA99CEbEg30GzB0Ya4isJkrCSR5sIJgGOaEQpFp7QsDg0MGmvnV0M0WvCSZrfhsMs/2IDgcTkzV6AETOhER3TQUVcqc8Dc1lYjsBkdTYUoS+p49e9DY2IiioiLs3LnzqsfPnj2LF154ASUlJQCAZcuW4eGHH56KohEREeWFKUno9957L77+9a+jrq5u1Gtuu+02bN26dSqKQ0RElHemZAPixYsXw+PxTMVbERER3ZRyZgz9ww8/xJYtW+Dz+fD9738fc+fOne4iERERzRg5kdBvueUW7NmzB06nE42NjdixYwd279494rVHjx7F0aNHAQDPPfccgsHgpJVDVdVJfb2bFeM4cYzhxDGGE8cYTtxUxjAnEnpBQYH1/ZIlS7Bv3z5EIhEUFhZede3q1auxevVq6+fJnD04lbMR8xnjOHGM4cQxhhPHGE7cjYhheXn5iPfnxCG+vb29EJlFe+fOnYNhGPB6J2mrIyIiopvAlLTQX3zxRbz//vuIRqN47LHHUFNTg3TmPME1a9bg3//+N/76179CURTY7XY89dRTObmtHhERUa6ShLhyO30iIiKaaXKiyz1XcB385GAcJ44xnDjGcOIYw4mbyhgyoRMREeUBJnQiIqI8oGzfvn37dBcil8yfP3+6i5AXGMeJYwwnjjGcOMZw4qYqhpwUR0RElAfY5U5ERJQHcmKnuFxw+vRpvPLKKzAMA9XV1XjwwQenu0g5r7u7G3V1dejt7YUkSVi9ejXWrl2LWCyGXbt2oaurC7NmzcJPfvITHs5zDYZhYOvWrfD7/di6dStjOE79/f2or69HW1sbJEnC+vXrUV5ezhiOw+uvv4633noLkiRh7ty52LBhA1KpFGN4DSMdD/5pn99Dhw7hrbfegizL+OEPf4gvfvGLk1cYQULXdfH444+Ly5cvC03TxObNm0VbW9t0FyvnhcNh0dLSIoQQYmBgQDzxxBOira1NvPbaa+LQoUNCCCEOHTokXnvtteks5oxw5MgR8eKLL4pnn31WCCEYw3F66aWXxNGjR4UQQmiaJmKxGGM4DqFQSGzYsEEkk0khhBA7d+4Ux44dYwzH4OzZs6KlpUVs2rTJum+0uLW1tYnNmzeLVColOjo6xOOPPy50XZ+0srDLHeZ2s2VlZSgtLYWqqlixYgVOnDgx3cXKeT6fz5rs4XK5UFFRgXA4jBMnTuCee+4BANxzzz2M5TWEQiE0Njaiurrauo8xHLuBgQF88MEH+NrXvgbAPAzD7XYzhuNkGAZSqRR0XUcqlYLP52MMx2Ck48FHi9uJEyewYsUK2Gw2lJSUoKysDOfOnZu0srDLHUA4HEYgELB+DgQCaG5unsYSzTydnZ34+OOPsXDhQvT19cHn8wEwk34kEpnm0uW2V199Fd/73vcQj8et+xjDsevs7ERhYSH27NmD1tZWzJ8/H7W1tYzhOPj9fjzwwANYv3497HY77rzzTtx5552M4XUaLW7hcBiLFi2yrvP7/QiHw5P2vmyhA9bBMENxL/mxSyQS2LlzJ2pra4ednEfXdurUKRQVFXFp0ATouo6PP/4Ya9aswQsvvACHw4HDhw9Pd7FmlFgshhMnTqCurg579+5FIpHA3//+9+kuVt4ZKddMJrbQYbbIQ6GQ9XMoFLJqV/Tp0uk0du7cibvvvhvLli0DABQVFaGnpwc+nw89PT0jHoNLpv/97384efIk3nnnHaRSKcTjcezevZsxHIdAIIBAIGC1fJYvX47Dhw8zhuNw5swZlJSUWDFatmwZPvzwQ8bwOo0WtytzTTgcht/vn7T3ZQsdwIIFC9De3o7Ozk6k02k0NDSgqqpquouV84QQqK+vR0VFBe6//37r/qqqKhw/fhwAcPz4cSxdunS6ipjzvvOd76C+vh51dXV46qmn8IUvfAFPPPEEYzgOxcXFCAQCuHTpEgAzOc2ZM4cxHIdgMIjm5mYkk0kIIXDmzBlUVFQwhtdptLhVVVWhoaEBmqahs7MT7e3tWLhw4aS9LzeWyWhsbMSBAwdgGAa++tWv4qGHHpruIuW8pqYmbNu2DfPmzbOGKB599FEsWrQIu3btQnd3N4LBIDZt2sSlLmNw9uxZHDlyBFu3bkU0GmUMx+GTTz5BfX090uk0SkpKsGHDBgghGMNxOHjwIBoaGqAoCiorK/HYY48hkUgwhtcw9HjwoqIi1NTUYOnSpaPG7Y9//COOHTsGWZZRW1uLu+66a9LKwoRORESUB9jlTkRElAeY0ImIiPIAEzoREVEeYEInIiLKA0zoREREeYAJnegmV1NTg8uXL093Ma5y8OBB7N69e7qLQTRjcKc4ohyyceNG9Pb2QpYH69r33nsv1q1bN42lIqKZgAmdKMc888wzuOOOO6a7GHlF13UoijLdxSC6oZjQiWaIv/3tb3jzzTdxyy234Pjx4/D5fFi3bh1uv/12AOa+0C+//DKamprg8XjwzW9+E6tXrwZgHo15+PBhHDt2DH19fZg9eza2bNmCYDAIAHjvvffwy1/+EtFoFCtXrsS6detGPKDo4MGDuHDhAux2O/773/8iGAxi48aNWLBgAQCz+3737t0oKysDANTV1SEQCOCRRx7B2bNn8dJLL+Eb3/gGjhw5AlmW8aMf/QiqquLAgQOIRCJ44IEHhu3SqGkadu3ahXfeeQezZ8/G+vXrUVlZaf2++/fvxwcffACn04n77rsPa9eutcrZ1tYGm82GU6dO4Qc/+MGw42mJ8hHH0IlmkObmZpSUlGDfvn2oqanBr371K8RiMQDAr3/9awQCAezduxdPP/00fve73+HMmTMAgNdffx3/+te/8NOf/hQHDhzA+vXr4XA4rNdtbGzEs88+ix07duDtt9/Gu+++O2oZTp06hRUrVuDVV19FVVUV9u/fP+by9/b2QtM01NfXo6amBnv37sU//vEPPPfcc/jZz36GP/zhD+jo6LCuP3nyJL785S9j//79WLlyJXbs2IF0Og3DMPD888+jsrISe/fuxbZt2/DGG2/g9OnTw567fPlyvPLKK7j77rvHXEaimYoJnSjH7NixA7W1tdbt6NGj1mNFRUW47777oKoqVqxYgfLycjQ2NqK7uxtNTU347ne/C7vdjsrKSlRXV1tHYL755pt45JFHUF5eDkmSUFlZCa/Xa73ugw8+CLfbjWAwiM9//vP45JNPRi3frbfeiiVLlkCWZaxatepTr72Soih46KGHoKoqVq5ciWg0irVr18LlcmHu3LmYM2cOWltbrevnz5+P5cuXQ1VV3H///dA0Dc3NzWhpaUEkEsHDDz8MVVVRWlqK6upqNDQ0WM/97Gc/iy996UuQZRl2u33MZSSaqdjlTpRjtmzZMuoYut/vH9YVPmvWLITDYfT09MDj8cDlclmPBYNBtLS0ADCPBC4tLR31PYuLi63vHQ4HEonEqNcWFRVZ39vtdmiaNuYxaq/Xa034yybZK19v6HsHAgHre1mWEQgE0NPTAwDo6elBbW2t9bhhGLjttttGfC7RzYAJnWgGCYfDEEJYSb27uxtVVVXw+XyIxWKIx+NWUu/u7rbOWg4EAujo6MC8efNuaPkcDgeSyaT1c29v74QS69Czow3DQCgUgs/ng6IoKCkp4bI2oiHY5U40g/T19eFPf/oT0uk03n77bVy8eBF33XUXgsEgPve5z+G3v/0tUqkUWltbcezYMWvsuLq6Gr///e/R3t4OIQRaW1sRjUYnvXyVlZX45z//CcMwcPr0abz//vsTer2PPvoI//nPf6DrOt544w3YbDYsWrQICxcuhMvlwuHDh5FKpWAYBs6fP49z585N0m9CNPOwhU6UY55//vlh69DvuOMObNmyBQCwaNEitLe3Y926dSguLsamTZussfAnn3wSL7/8Mn784x/D4/HgW9/6ltV1nx1//sUvfoFoNIqKigps3rx50steW1uLuro6/OUvf8HSpUuxdOnSCb1eVVUVGhoaUFdXh7KyMjz99NNQVfPP1jPPPIPf/OY32LhxI9LpNMrLy/Htb397Mn4NohmJ56ETzRDZZWs///nPp7soRJSD2OVORESUB5jQiYiI8gC73ImIiPIAW+hERER5gAmdiIgoDzChExER5QEmdCIiojzAhE5ERJQHmNCJiIjywP8DmFio5PQqsnoAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfQAAAEJCAYAAABi2tVNAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOzdeXSU5dn48e8ze/ZlspFAWBLCDrIrsggEUECWvljaX62nVVtrN6q1r2Cx1h7bUmvVYrWv9aXU81ZPLValKFJAQZbIvhsCWSAkgSRkT2afeZ7fHyMjMQlMyB6uzzk5JDPPcs+dIdfc66VomqYhhBBCiB5N19UFEEIIIUTbSUAXQgghegEJ6EIIIUQvIAFdCCGE6AUkoAshhBC9gAR0IYQQohcwdHUB2urixYvtdq24uDgqKira7Xo3K6nHtpM6bDupw7aTOmy7jqjD5OTkZh+XFroQQgjRC0hAF0IIIXoBCehCCCFELyABXQghhOgFJKALIYQQvYAEdCGEEKIXkIAuhBBC9AI9fh26EEII0RE8bg1V0zCbr9/21TQNl1Ojod6Hx63hdml4PBqF+kr6pWnodEqHl1cCuhBCiB7B69XQ60FR2h4cnQ6V6kovbpeGTq+g14NOpwQer6n00VCvAhAWriMmTk9snIHQcB0+L3g9Gl6vhsOuUlfjo7bah8upNbmPonPSJzUSk1kCuhBCiJuQy6VSU+kPlP4vLw67ht4A4RF6IqJ0hEfo0TRwOVXcLg2XS8Pr0fD5NFQf+HwaBoOCJVSHJUQhJESHw6FSXeHDblNbvLfJrBBj1ZPS34ROB9WVPsoveSk+72lyrKJARKSOhCQjUTF6wqN0mEw6jCYFk0khMSmOysrKjqyqAAnoQgghOoTToVJT5aOmyktNlQ9bvYolVCEsXE9YhI7QMB2KApoGmupvgddW+aiq8AZaxwBhETpirAZSB+lxu1Tq61Qqyr4IsEajgsmiYDIrmC0KOr0OvR70egWPR8NpV6m67MPp1DCZFGLiDAxINxETZyAkVIfq01BV/wcAk1khJFTXpBdA0zTsDSoOh4rBoGAwKhgMCkaTgl7fcuu7PXoTgiUBXQghRNC8Xu2qLmaViCg9kdF6wsL9wbmmykdpiYeyix7qa/1BWVEgIkpPtFWP065SfsmD61zT7mkAo8nfOu47wERsnIGoGD0GY/NB0evR0OlAd42AejVN0z4vT+uDrKIohEXoCYvQt/rcziIBXQghehGXS6XkvJui8x48Hg2LRcES4u9yNlt0mC3+f01mBUXxT/xyfz6JS1U19Hrl8y+oraynvNyJ067isKvY6tVGLeer6XSgNyh43BqKArHxBoaP8beCI6P1GAyNg6jHo+H4vNtb0fmDvk6nEBKqBB1wWwr0LenM1nJXkIAuhBA9mKpq2G3+iVkXL3govehBUyE6Vk9snB6XQ6O+1sflMhVv0yHg67AD/lZzSIhCWKSO5FQTUTF6omL0mMwKDXUqdbU+6mt8uF0acUkGEvoYMJmuPTPcaFQwRnff1m5PJAFdCCG6KbdbpbrSR3WFfwza59PQKQqKzt8idtj8LWb180azyawwMN1Mv4EmIpsJlj6vf+KYy6kGZmRfmbxlMivodOD7fDKZ6oOY2BicztprtoSvBHfR9SSgCyFEF3PYVaoqvNgaVBw2f/e2vUHF1tB4DNpoUvD5NDQvqCpYQnXE9zESEemf9R0Zpb/meLLeoBBqUAgNa7n1bLzq++gYExUVvbubujeRgC6EEB3A69Ww1avYGvyzux12Fb1B8Xc1m5TABLLKcm+jJVRXZllHRuvpO9BErFVPdKyh1ePF4uYjAV0IIW6QpvkndtXXqTTU+TcisTWo2Op9OB2NZ3EbTQqqT8Pna/yYNd7AwAwz1ng94RF69AYJ3OLGSEAXQoireNwa1VVe6mt9OO0aToeK06HidmvoFFB0/ta1qoKt3tcoQJvMCmHhOuISDYRF6AkP1xEWoSMs/IulV6rPvyWoz0erZnQLcT0S0IUQvZ6m+YOopn6+iYnmX8Pscqo4nRouh39yWXWlN7B2GkCnh5DPl3yFR+rRtC+uoShgTTATEakjIlJPeKQOUxB7fuv0CuYg100L0RoS0IUQvZbXq1F0zk3+GVdgzXNLjEaFaKue5H4mYqz+mdv+sW4JvqJnkIAuhOiRAjuWVfl3LdPQCA3TExqmIyRMR/G5Kj47XofbpRFj1TMw3YJOr3y+gYl/xrfZomCx6DBbdBiMvX/jEdG7SUAXQvQIXq9G1WUvl8u8VJR5qKtV4fN5Z1fWUDsdjXdOSUw2kDbUQmycXoK16PUkoAshui27TaWsxL/7WdVlL6rqb13HxBnIGG4kKsa/17clxN817vP501nabSrJKVa8vrqufglCdJpOC+jHjh1j/fr1qKrK7NmzWbJkSaPnP/vsM5599lkSEhIAmDx5MsuWLeus4gkhuoGrd0Yru+ihrsY/7h0eqWNAupn4JAOx8YYm+4JfodcrhEf4l3/5N0XpzNIL0bU6JaCrqsq6detYvXo1VquVVatWMWHCBPr27dvouGHDhrFy5crOKJIQohtwu9TPu9C9/pSZdZ9PXFMg1qpn+BgLiSlGwrtxhishuotOCeh5eXkkJSWRmJgIwJQpUzh48GCTgC6E6HlU1Z/84+q11lfYGnwUn3dTdtGLTucf6zaZ/bmqqyv9k9nAP8M8Jk5PSn/ZGU2IG9UpAb2qqgqr1Rr42Wq1kpub2+S4s2fP8rOf/YyYmBi++c1v0q9fvybHbN++ne3btwOwZs0a4uLi2q2cBoOhXa93s5J6bLueUIder0peTj2njlZTX+cFIMZqIj7RQkSUkaJzNspLnQAk9rGg1ys4HT7qa324XSqxcWbGTooiuV8ocQlmdLr2DeA9oQ67O6nDtuvMOuyUgH4lqfzVvjzjdODAgbzyyitYLBaOHDnC73//e9auXdvkvMzMTDIzMwM/V7TjIFlcXFy7Xu9mJfXYdt25Dh12lZILbgrOuHA5NaJj9YyZGILDrlFd6eVcbj0ej0Z4pI5hoy2k9DcREtrShisq0EBVVUO7l7M712FPIXXYdh1Rh8nJyc0+3ikB3Wq1UllZGfi5srKSmJiYRseEhoYGvh83bhzr1q2jrq6OyMjIziiiEOIqXq+Gz6t9/q8/iF8u83K51BMY545LNDDuVjPWBEOjD+iapuF2aZjMsimL6N18Ph9ut5uQkJCuLgrQSQE9LS2NS5cuUV5eTmxsLFlZWfz4xz9udExNTQ1RUVEoikJeXh6qqhIREdEZxRPipuX1alRe9lJT6f08M5j/y+Nu2qum04M13kDqIBMJSUYiopqfqKYo/g1bhOjNLl26xLZt26ipqWHAgAHccsst9OvXr0s/xHZKQNfr9dx///38+te/RlVVZs6cSb9+/di6dSsAc+fOZd++fWzduhW9Xo/JZOInP/mJfLoXop1caTX7E434d1i7XOalusK/thsgJExHWLiO5H5GQsN0GAwKeoOC3gAmk0KM1SCZwES7KS0tJT8/n6SkJFJTUzEajdc/qRW8Xi/l5eVERETcUOPQbrdz9OhRbDYbgwYNon///hiNRrxeL/v37+fIkSOEh4czduxYcnJyeO+994iJiWHYsGFERUURHh5OeHh4k97ojqRozQ1w9yAXL15st2vJeFH7kHpsu/aoQ9WncanEQ2Gei6pKH9qXtjKPjNYRn2gkPslATFzLa7t7KnkfNuZ2u3G5XJjNZoxGY1ANppbqUNM06urqcDqdJCQkBN340jSNwsJCDh8+TElJSeBxg8FA//79SUtLIyUlhfDw8FY36DRNo7S0lMLCQkpKSigtLcX3eSq8/v37M2LECAYOHIher8fhcFBaWkpZWRk6nY7ExEQSExOxWCzY7XYOHz7MyZMn8fl8mM1mnE4ner2e/v37U1tbS2VlJcOHD2fatGmYzWa8Xi+5ubmcOHGCsrKyRuVSFIUHH3ywXbvlu3QMXQjReewNPgoL3FwocON2aYSG6Rg02ExIqA5LqH/v8tBw//7lovdzOp0cO3aMY8eO4Xa7AdDpdFgsFhISEpg4cSJ9+vS55jV8Ph81NTWUlpZSUlJCSUkJ9fX1AMTHxzN+/HjS09PR6Zp/T9XU1FBQUMDp06eprKwkPDycadOmMWzYMC5fvkxeXh4FBQXk5+cDEBYWFgiy8fHxxMXFERYW1myQ93q9nDlzhuPHj1NRUYGiKMTHxzN69GiSk5O5fPky2dnZbN68mZCQEIxGI3V1/h0EFUVpNGk7OjqahoYGfD4fQ4YMYdKkSURGRnLx4kXy8/PJy8tDURTuvvtuBg4cGDjPYDAwbNgwhg0bhsPhoKGhAZvNRkNDA6qqYrFYWvEbu3HSQr+KfKJvH1KPbdfaOtRUjbJLXgrzXZRf8oLi38d8QJp/d7WbcfiqN70Pq6qqOHr0KIMHDyY1NTWoc74cyNPS0ujfvz8ulwun04nD4aCgoACn00n//v2ZNGkSSUlJ2Gw2KioqqKyspKGhgZKSEqqqqlA/H5uxWCz07duXlJQUdDodR48epaamhsjISEaMGIHZbA6UwW63U1BQEPg9JCQkMGbMGDIyMtDrG8/B0DSN8vJySktLA63nmpqawPMWiwWr1YrFYsFgMGAwGNA0LfAarFYrY8aMYfDgwY3KAP7NzS5cuMDp06dRVTWwL0pCQkKj+5aVlWGxWJgwYQLR0dFN6vRKuGzN/6fOnOUuAf0qvekPQFeSemy7a9WhqmrYG76YwGZv8HGpxIPTrmG2KPRPM5E6yHyNpWI3h97wPqyvr2f//v2cPn0aTdPQ6XQsXLiQAQMGXPO8vLw8Pv74Y5xOJ2lpaUyePLnZtdBut5uTJ09y+PBhnE4nJpMp0IoHiIyMJCYmhri4OKxWK/Hx8cTGxjYKaKqqUlBQwOHDh5t0N4M/+KSlpZGWltbqVUtOp5PKysrAB4zKykpcLhderxefz4fP5yMlJYUxY8aQkpLSLT+4SkBvBQno3Y/UY9t9uQ41TaOqwr/r2qUiDx7PF/9t9QaIsRron2YiKcXY7hu09FTd7X3o8/koLy+nuLiYkpISysvLCQ8PDwTL2NhYVFUNtKBra2vJzs5G0zRGjx7NqFGj2LJlC5WVlS0GdafTySeffMKZM2dISEhg9uzZxMfHX7dsbrebU6dOUVNTg9VqxWq1EhcXR9++fYOuQ03TcDqdjbqw9Xp9k9byzabXrUMXQrSex61RVemlstzLpSIPdpuKXg9JfY3EJxoJi/DPSpf13l3P7XaTn5+P3W7H6XTicrlwOByB4Hyli/vKJC2r1cqgQYOw2WwUFRWRk5PT5Jo6nY6MjAxuvfXWQMt26dKlvPvuu7z//vuBoK6qKnV1dVy6dImsrCwcDgeTJ09mwoQJTbq1W2IymRg3blyb6kBRlG6zHvtmJQFdiC5UVeEl56QTj1vDaASDUcFgUHDYHVRVukADRQFrgoGMERb69DXKHudB0DSt2R0qb4Tdbqe4uBi3282QIUOaLK+qrq7m/fffp7q6GvhiwpnZbCYkJISIiAji4+MJCQkhKSmJlJSUJoHP6XRSXV2NXq8PnGsymZp8ULNYLI2Cenx8PJWVlXi9/q13rVYrd999dyBrpbi5SEAXogu4nCqnTzgpOufGEqIQFaPH49Fw2DW8HpWoaAMZw81Y4w1EW3vfkrKOpGka77zzDg0NDYwfP55hw4Zdt6WqaRp2u52GhobAV1VVFcXFxYFADXDgwAFuv/12MjIyUBSF/Pz8wP4ZixYtIjk5OeglYVezWCzXnWl+9bFLly5l+/btuN1uRo4c2WiMu6WZ5qL3kzH0q3S3Mbee6mavR7dLxd9g8v/X0jTwejQ8bg23W8NWr5Kf48Lr1UgbYmbwcEuTVvfNXofXc+rUKWJiYkhJSWny3OnTp9m2bRuxsbFUVVURERHBhAkTGDp0KAbDFzP+XS4XFy5coLCwkMLCQmw2W6PrGI1GkpOTAzO6vV4vu3fv5vLly4FZ0sePHycxMZH58+f3yp0t5X3YdjKGLkQP5PVo5Jx0cC7Xfd1j4xIMjBwfQkTkzZfn2+12U1JSQnV1NTU1NdTU1FBXV0d0dHQgeCYkJLTYqi4sLOTjjz/GZDLx9a9/naioqEbX3rt3L4mJiTz88MMcPXqUffv2sWPHDnbs2AEQWPLkcrnQNA2z2UxqairJyclEREQEdvgKCQlp0tJevnw5OTk5ZGVlUVpaysiRI5k+fToGg/wpFV1P3oVCtIPySx5OHLLjsGv0TzMRHesPRlfigcGoYDTpMJkUTGb/Xuc9eSJbbW1tYIIX+CdVhYeHt3h8VVUV586do7CwkIsXLzZa0xwVFUVCQgJVVVVkZWUB/tbx1KlTGTVqVKPruN1uPv74Y6Kjo7Hb7WzZsoVly5YFgv/Bgwex2+0sXLgQnU5H//79SU1NpaioiLKyMrxeb+DLYrHQv39/kpKSgu6m1ul0DB8+nPT0dKqqqkhKSmpVvQnRkSSgC9FKLpeKw6bicmo47CqVl71cvOAhPELH7bPCiI3vvf+tNE1j165dHD9+vMlzs2bNYuTIkU0ez8nJCeRtsFqtjB07ltTUVOLj45vsoGW32ykpKeHUqVPs2LEDs9lMRkZG4PmsrCzq6+u55557sNlsbN68maysLKZNm0ZNTQ1Hjx5l2LBhjQKtoiikpqYGvSFLMEwmkwRz0e303r88QrQjTdMov+Sl4KyLijJvo+d0Ohg83D8Wrtd3/1a3qqo4HA5CQkJaNYFK0zQ++eQTTpw4wciRI+nbt2/guezsbHbs2EFoaCiDBg0KPH7+/Hm2b99O3759mTNnznXHmUNDQxk8eDADBw7kvffeY+vWrVgsFlJTUykpKeHEiRPccsstgQlko0aN4ujRo/Tt25eTJ0+i1+uZMmVKK2tEiN5BAroQ12C3qZSWeDiX68LeoGIJUcgYYSEyWoclxP9ltihdupmL1+tFVVVMJlNQx2/fvp2cnBwURSEsLIywsLDAtpktbUJydTAfO3YsU6dObTRkMGDAAN59910+/PBDli5dSnJyMqWlpWzevJnY2FgWLFjQqg1GDAYDd999N2+//TYffPABixcv5qOPPiIyMpJbb701cNy0adO4dOkSW7ZswePxMHXqVMLCwoK+jxC9iQR0Ia7i9WiUXfJQUealotyLvcE/1htj1TN0VCh9+navndh8Ph9vv/025eXlREZGEhcXR1xcHIMGDWp2LfKVTUwyMjKIiooKLNHKzc0lOzub1NRUxo8fT9++fQOJKzweD1lZWS0Gc/B3QS9atIgNGzawadMmZs+ezccff0xoaCiLFy++od3CzGYzixcvZsOGDfzrX/9C0zSWLFnS6IOLwWDgzjvv5B//+AfR0dGMGTOm9ZUoRC8hy9auIks02kdPq0dN06ip8nGhwE3JBTc+LxiMYI03EJdoJD7RQERU585GD7YOP/30Uw4ePMioUaNwOBxUVlZSU1ODXq/nnnvuadTi9vl8vPnmm/h8Pu69995GM7NdLhcnTpzg+PHj2O12wsPDUVUVp9MZmMDWUjC/Wl1dHRs2bMBmsxESEsI999zTbJKL1qiqquKdd94hLS2NmTNnNntMZWUlJpOpUZd+T3sfdkdSh20ny9aE6GAet0Z1pZeqCi9lFz3U1fi3VU1ONZE60ESMVY/SjVrizSktLeXQoUMMGzasUaCz2Wy89dZbvP/++yxfvpzQ0FAAjh49SnV1NYsWLWqyzMpsNjNx4kTGjh3L6dOnKS4uxmw2YzabsVgsREdHM2jQoOvOzI+MjGTx4sXs2bOHKVOmtDmYA8TGxvLtb3/7muP9Vqu1zfcRoqeTgC5uGl6vRmGei+Lzbupq/a1ORYGoGD2jxoeQ0t+EsYdsq+r1etm2bRthYWFMnz690XNhYWEsXLgwMP68dOlS7HY7Bw4cIC0t7ZqZugwGA6NGjWqyXKw14uLiWLJkyQ2f35xg9yQX4mYmAV30ej6vxvl8F3mnXbhdGrHxeoaMtBATpycm1tAj90bPysqiurqapUuXNjs+nZCQQGZmJlu2bGHnzp04nU6AJsFfCNF7SEAXvZbXo1FY4CI/x4XLqRGXaGDICEuPXydeVFTEsWPHGD16NP369WvxuIyMDKqqqjhw4AAAt99+e6/cnlQI4dez/7IJ0Qy3S+VcrotzuW48bg1rgoHxt1mwJrT/2/1KUo/Q0NAO3/nN5XJx5MgRjh49SnR0NLfffvt1z5k8eTK1tbXU1tZyyy23dGj5hBBdSwK66DVsDT7OnXVx4Zx/pnpisoH0YRZi49r3bV5RUUFRURElJSWUlJTgcrkYMGAAM2bMaLSv+BV2u73ZfcFb4nA4qKqqwmazBZKJnDp1ikOHDuF0OsnIyGDKlClN0ng2R1EU5s2bh6ZpPXqrWSHE9UlAFz2apmlUXfZRcNZFaYkHRQcp/YykDbUQGd3+E6lOnDjBzp07AYiKiiItLY2wsDCOHTvG3//+dyZMmMD48eNxuVycPXuWM2fOUF5ezsiRI5k5c2aLQdXlcpGfn8+ZM2coKipq9pjU1FSmTJlyQ7muJZgL0ftJQBc9Sl2Nj6oKL/W1PhrqVOrrfLicGkaTwuDhZgakm7GEdEw+6JKSEnbt2sWAAQOYOXNmo/HoUaNGsXv3bvbv38+JEydwOp1omkZ8fDzp6emcOnUKg8HAtGnTGgVXm83G3r17yc3NxefzERkZyaRJk0hJSaGmpiaQSKRPnz6NtloVQogvk4Auuj2PW6XkgocLBW5qq/0ZvgwGCI/Uk5BkJDZeT0qqCb2h41qh9fX1bN68maioKObNm9dkZnl4eDh33XUXI0aM4NixY8THxzNkyBBiY2PRNI3du3dz7NixwF7jiqKQm5vLjh078Hg8jBgxgiFDhpCUlISiKLKhhxCi1SSgi25J0zSqKnwU5ru4VOxB9UFklI4RY0NISjESEtp56Ue9Xi+bN2/G6/Ved0/y5rJ6KYrCtGnT8Pl8HD58GPB/QDh79iyJiYnMmTOH2NjYDn0NQojeTwK66FbcLpXi824KC9w01KkYjNBvgInUQSaiYvTtHsTLyso4efIk8fHxJCYmEhcX12gXNZ/Px44dOygrK2PBggU3HHgVReGOO+7A6/Vy+PBhdDodt912G+PHj29VxjMhhGiJBHTRLWiqRmGBm9MnHHg9/mQoYyaGkJxqwtBBXekNDQ1s2rQJp9NJdnY2ADqdjujoaNxuNy6XC4/HA8CkSZNIS0tr0/0URWH27NkkJCSQnJzcYmYzIYS4EZ0W0I8dO8b69etRVZXZs2e3uDVkXl4eP//5z3nkkUcapUkUvVdttZcThxzUVPmISzQwfIyFqJiOfWte6Ub3eDx8/etfx2QyUVZWRmlpKTU1NYE9zC0WC5GRkWRkZLTLfXU6nWQEE0J0iE4J6Kqqsm7dOlavXo3VamXVqlVMmDChyaxdVVV54403ZAOMm4TDrpKf4+RcnhuTSWHs5FBS+hvbrVu9pqaG/fv3U19fz8SJE+nfvz/wRW7v0tJS5s+fH0jsERERQXp6ervcWwghOlunBPS8vDySkpJITEwEYMqUKRw8eLBJQP/www+ZPHky+fn5nVEs0UXqa33k5TgpKfR3Z/cfZGLoaAsmU/uMJdtsNg4cOMBnn32GTqcjJCSEjRs3MmDAAKZNm0ZxcTGfffYZEyZMkAAuhOg1OiWgV1VVNUpvaLVayc3NbXLMgQMHeOqpp/jzn//c4rW2b9/O9u3bAVizZg1xcXHtVk6DwdCu17tZtVSPDXUe9u+t4EKBDYNBYeioKEaOiSY88vo7ngXr2LFjbNq0CZ/Px4QJE5gxYwYhISHs27ePTz75hDfeeAOAwYMHs3Dhwm47IU3ei20nddh2Uodt15l12CkBXdO0Jo99uVv1b3/7G9/4xjeu+wc2MzOTzMzMwM/tuVZX1v62jy/Xo6ZqnMtzk3PSAUDGCDMDBpsxmxWc7lqc7VTl1dXVbNy4kcTERDIzM4mOjsblcuFyuRg6dCipqans27eP6upqZs2aRVVVVfvcuAPIe7HtpA7bTuqw7TqiDpOTk5t9vFMCutVqpbKyMvBzZWUlMTExjY7Jz8/nj3/8IwB1dXUcPXoUnU7HpEmTOqOIooPU1fg4ftBOTZWPhD4GRo0PJTSs/VvFqqqybds2DAYDd911F2FhYU2OCQ0NZdasWe1+byGE6A46JaCnpaVx6dIlysvLiY2NJSsrix//+MeNjnn55ZcbfT9+/HgJ5j1YTZWX/BwXF4s9mEwK424NJTm1/Sa8fdmxY8coLS1l7ty5zQZzIYTo7ToloOv1eu6//35+/etfo6oqM2fOpF+/fmzduhWAuXPndkYxRCeoKPdyOKuEi0UODEZIH2ombYgZk7n1rXKv18vOnTsxm81MmTIFvb75ZCvV1dV8+umnDBw4kCFDhrT1JQghRI/UaevQx40bx7hx4xo91lIg/8EPftAZRRLtyOfTOH3cwblcNyEheoaNttA/zYzRdGMtcrfbzfvvv09xcTHgH6a56667mmy7enVX+6xZsySrmBDipiU7xYk2szf4OPypf5x8YIaZqTNTqKm58QlnDoeDf//735SXlzN37tzA9qsbNmxg0aJFREZGomkaly9f5sSJE9LVLoQQSEAXbXSp2M3xAw40NCbcHkqfviYMhhuf9FZfX897771HXV0dCxYsYNCgQQBERkbywQcf8M9//pP09HTOnTtHfX09iqIwcuRI6WoXQtz0JKCLG6JpGrmnXZw56SQqRs/4KaGEhTc/xn0tXq+X0tJSSkpKKC4uprS0FJ1Ox+LFixttPNSvXz/uueceNm3axKlTp0hNTWXy5MkMHDiQkJCQ9nxpQgjRI0lAF62mqhonDzu4UOAmpb+RMRND0eubjl37fD6OHDmCx+MhOjqa6OhoIiMjqampaRTAfT5/jvP4+HhGjx7N8OHDG21EdIXVauW+++7D5/NhNLbfZjRCCNEbSEAXreL1aBzKsnG51Mvg4WaGjLQ0OxFN0zQ++ugjcnJyUBSl2c2FrgTwlJQUkpOTsVgs172/Tqfrtru7CSFEV5KALoJWW+3l2AE79bUqY2NXT5oAACAASURBVCaGkDrI3OKxe/fuJScnh1tvvZXx48dTX19PTU0NtbW1REREkJKS0mTGuhCieZqm4XQ6UVW1U1dylJWV4XK5Ou1+vdGN1qGmaeh0OiyW5htNzZGALq7LblM5c9JBcaEHo0lh0rQwEvq03OWdlZXFkSNHGD16NBMnTkRRlECXuxCi9ZxOJ0ajEYOhc/9kGwyGFvd/EMFpSx16vV6cTmfQ84QkoIsWeb0auZ85KTjr/3SZPtRM+jAzxs+zommaRm5uLqqqBnKHV1RU8PHHH5Oens706dNlXbgQ7UBV1U4P5qLrGQyGVrXu5R0imlVd4eXofju2BpW+/Y0MGRXSZA/2wsJCtmzZ0uTcAQMGMHfuXBnrFqKdyAfjm1drfvcS0EUjqqpx9jMnuaddhIQo3DYznLiE5t8mn332GRaLhWXLluF2u3E6nXi9XsaNG0ddXV0nl1wIIW5uEtBFQH2tj6P77dRW++g3wMSIcSEYjc1/OrTb7Zw7d44xY8YQGxvb6DmTydQZxRVCCHEV6RMVaKpG3mknu7bW47CrTLg9lFsmh7YYzAGys7NRVZURI0Z0YkmFEF1h2bJl7Ny5s9Fjr732GqtWraKgoID77ruPKVOmcOedd7Js2TL27dsXOG7Hjh0sWLCA6dOnM2fOHL73ve9RUlLS4r2effZZMjMzmTNnDl//+tcpLS0FwOPxsGLFCmbPns2MGTN46aWXrlnmtWvX3tBrfeyxxzh79uwNndvVpIV+k2uo83HsgJ3qSh9JfY2MHh+C2XLtz3mapvHZZ5+RnJzcpHUuhOhY6j9eQys6167XVPoNRPe177T4/OLFi9m4cSN33HFH4LGNGzfy5JNPct999/GLX/wikGwrJyeH48ePc+utt5KTk8Pq1av529/+xuDBgwHYunUrRUVFpKSkNHuvhx9+mP/+7/8GYN26dbzwwgv87ne/4/3338ftdvPRRx/hcDi44447WLJkCf369Wv2Oi+99FKTNN3g//t1ZUlYc5577rkW66G7C6qFXlhY2NHlEF3gYpGbT7bW01CvMu7WUCZMCb1uMAcoLi6mtrZWWudC3CQWLFjA9u3bAzOui4qKKCsro6CggPHjxzfKnDl06FCWL18OwMsvv8yPf/zjQDAHf5bNW2+9tcV7RUREBL632+2BSWGKomC32/F6vTgcDoxGI+Hh4c1e4ze/+Q1Op5M5c+bwwx/+kKKiImbMmMGqVauYN28eFy9eZOXKldx1113MnDmzURBftmwZx48fB2Dw4MGsWbOGzMxMFi5cyOXLl1ss99atW1m4cCFz585l+fLlgWNtNhuPPPIIs2fPJjMzkw8++ADw91zMmzePzMxMvvrVr7Z43dYIqoX+q1/9itjYWKZNm8a0adOIiYlpl5uLrnOp2M2RT+1Ex+qZcHsYlhAdPp+P4uJidDodISEhmM1mQkJCmiyX+eyzzzCbzY3+kwohOse1WtIdJTY2lltuuYWdO3cyb948Nm7cyKJFizhz5gyjRo1q8byzZ8/yve99r9X3W7NmDW+//TaRkZFs2LAB8H+o+M9//sPYsWNxOBz88pe/bDEWPfHEE6xfv55t27YB/g8g+fn5PP/88/z2t78F4PHHHycmJgafz8fy5cvJzs5m+PDhja5jt9sZN24cK1eu5JlnnuGNN97gJz/5SbP3nDRpEps2bUJRFN58801eeeUVnnrqKZ5//nkiIiL46KOPAKipqaGyspKf/exnvPPOO6SmplJdXd3qOmpOUAH9L3/5C0eOHGH37t1s2LCBIUOGMH36dCZPniy7ffVApSUeDmf5g/mtM8IxGBXcbjcffPABRUVFjY7V6XRMnjyZ8ePHo9PpcDgc5OXlMWrUKFkXK8RNZMmSJWzcuDEQ0J9//nn+9a9/NTrmgQce4Ny5cwwaNIj//d//bfRcVVUVy5cvx+FwcO+9914z0K9cuZKVK1fy0ksvsX79eh577DGOHTuGXq/nyJEj1NbWsnTpUqZNm0b//v2DKn/fvn0ZP3584OdNmzbxxhtv4PP5KCsrIzc3t0lAN5lMzJkzB4BRo0axe/fuFq9/6dIlHn74YcrLy3G73aSmpgKwa9cuXn755cBx0dHRbN26lVtvvTVwTHs1koPqctfr9UycOJFHH32UV199ldtuu41///vffOc73+FPf/oTOTk57VIY0fHKLno4lGUjKkbP5On+YO50OnnvvfcoLi5mxowZLF26lPnz5zNr1iwGDhzIp59+yjvvvEN9fT05OTkyGU6Im9Cdd97Jnj17OHnyJE6nk1GjRjFkyBBOnjwZOObKmHdNTQ0AGRkZnDp1CvC38rdt28a9996LzWYL6p5Lly5l8+bNALz77rvccccdGI1G4uLimDhxYqBrPBihoaGB7y9cuMCrr77KW2+9xfbt25k9ezZOp7PJOQaDIdDlr9fr8Xq9LV7/ySef5Nvf/jYfffQRv/vd7wLDE5qmNbuWvCP2FmjVLHen08mBAwfIysqisrKSKVOmkJSUxEsvvdTk05jofsoveTi010ZklJ5bZ4RhNCnYbDb+9a9/UV5ezvz58xkzZgz9+vUjPT2dkSNHMn/+fObMmcPly5d58803OXLkCImJicTFxXX1yxFCdKKwsDBuu+02Hn30UZYsWQL4W+2HDh1i69atgeMcDkfg++9///usXbuW3NzcZp9vTkFBQeD7rVu3kpaWBkBKSgp79+5F0zTsdjtHjhwhPT29xesYjUY8Hk+zz9XX1xMSEkJkZCSXL19mx44d1yxTMOrq6khKSgIIDBMAzJgxg/Xr1wd+rqmpYfz48Xz66adcuHABoHO73I8cOcKuXbs4evQoQ4cOZdasWTz++OOB9cZ33nknDz/8MA8++GC7FEq0v7JLHg7tsREeeSWY66irq+Odd97B4XCwePHiZmeLKorCsGHD6NOnD1u3bqW0tPSaE1qEEL3XkiVLePDBB/nzn/8MQEhICK+//jpPP/00Tz31FHFxcYSHhwdmlw8bNoynn36aFStW0NDQQExMDCkpKfz0pz9t8R6//e1vyc/PR6fTkZKSwpo1awD41re+xSOPPMKsWbPQNI3ly5c36SK/2je+8Q0yMzMZNWoUjz/+eKPnRowYwciRI5k5cyapqalMnDixrVXDT3/6Ux566CGSkpIYN25cYPjy0Ucf5b//+7+ZNWsWOp2ORx99lPnz5/Pss8/y4IMPoqoqcXFx/OMf/2hzGRStubyWzRR0xowZ15wQ99FHHzF79uw2F6i1Ll682G7XiouLo6Kiot2u111cCeYRn7fMTWYdHo+HDRs2UFdXx5IlSwKfLK/F5/Nx8eJF+vbte83uot5aj51J6rDtelMd2u32Rl3GncVgMFyzm1lcX1vrsLnffXJycvP3CuaCf/jDH657TFcEc3F9ZRf93exXB/MrucorKipYtGhRUMEc/GNILa35FEII0bWCCujPPfccCxYsYNiwYYHHTp8+zebNm6/ZdSK6VtlFNwf3NBAZbQwEc4CjR49y9uxZbrvtNgYMGNC1hRRC3JSeeOIJDh482OixBx98MLCGPVgLFy5skpFs7dq1jeJVe/rjH//I+++/36QMK1as6JD7tUZQAT07O5tHH3200WMZGRn8/ve/75BCibarrvCy7T+fUt1wirTwQZSVj6Rv374UFRWxd+9e0tLSmDBhQlcXUwhxk/rNb37TLtf5cnDtaCtWrOgWwbs5QQV0o9GI0+ls1I/vdDol8X03VV/rY8+OcqobThEdHUVR0QXy8nKJiorC6XQSGxvLnDlzJCWjEEL0IkEtWxszZgx/+ctfsNvtgH+Qft26ddxyyy0dWjjReg67yr5PGqiqO4qiaCxevIgHHniAuXPnEh4ejsFgYMGCBZIRTQghepmgWuj33XcfL730Evfffz/h4eE0NDRwyy238KMf/aijyydawe1S2bezAZu9ilpbPuPGjSMyMhLw7688dOjQLi6hEEKIjhJUQA8PD2fVqlVUV1dTWVlJXFwc0dHRHV020QqapnFknx27TcWjO4bZbJYxciFEu1i2bBk//OEPG2Vbe+211ygoKOA73/kOv/zlL8nLyyMyMpLw8HAee+yxwH4VO3bs4LnnnqO+vh6z2UxaWhpPPvlki9nWnn32WbZu3YqiKMTFxfHCCy+QlJSEx+Phscce49SpU3i9XpYtW9aujcrJkyfz4Ycf9ugMkq3ajDsmJobo6Gg0TUNVVYAWU9B92bFjx1i/fj2qqjJ79uzATkNXHDx4kLfeegtFUdDr9XzrW9+SFmUrnM9zc7nUS2JqJfsOFjF16lQsFktXF0sI0Qv0xPSpN6OgAnpVVRXr1q3j9OnTTfbgfeutt657vqqqrFu3jtWrV2O1Wlm1ahUTJkygb9++gWNGjRrFhAkTUBSFwsJCXnjhBV588cVWvpybU32dj+zjDuKT9OSd209kZCSjR4/u6mIJITrA/x4q41x1033H22JgjIUHJyS2+PyCBQt49tlncblcmM3m66ZPvdIYayl96rW0R/rU119/naKiIlavXg3449TJkyd55plnuP/++7l48SIul4sHHniAe++99zq149fSeTt27GDNmjX4fD5iY2P55z//ic1mY/Xq1Zw4cQJFUXjkkUdYsGBBUPdpi6CzrZnNZn7xi1/w1FNP8fTTT7NhwwbGjh0b1E3y8vJISkoiMdH/hpkyZQoHDx5sFNCvbk26XC6ZgR0k1adxdJ8dcOFUz1BRUcG8efMkE5oQot30tPSpCxcuZNGiRYGAvmnTpsB2tH/4wx+IiYnB4XCwYMEC5s+fH1Q3e3PnaZrWbBrUF198MZAy1WAwdNqOhUH91T979iyvvPIKFosFRVEYMGAADz/8MKtXryYzM/O651dVVWG1WgM/W63WRpv1X3HgwAHefPNNamtrWbVqVStexs3r0P5i8s6fwO4+j3rRR3p6OhkZGV1dLCFEB7lWS7oj9aT0qVarldTUVA4fPszAgQPJz88P7Nf+17/+lQ8//BDwbx1+7ty5oAJ6c+dVVlY2mwZ19+7dvPLKK4FzO2vOWVABXafTBdach4WFUVdXR0hICFVVVUHdpLnt4ptrgU+aNIlJkyaRnZ3NW2+9xZNPPtnkmO3bt7N9+3bA/ymuPbN+GQyGHpVFbON7Wzh8JAudzsD48eOYPHkyCQkJXV2sHleP3ZHUYdv1pjosKyvrsl63K/dduHAhTz/9NNnZ2bhcLsaOHUt2djb79u0LHPP6669z7Ngxnn76aQwGA0OHDiU7O5sxY8aQkJDAjh07eOWVV7DZbEG9nmXLlvGNb3yDlStXsnHjRmbPnk1ISAghISFMmjSJU6dOBbKxfdmSJUv44IMPSE9PZ/78+RiNRvbu3cuePXvYvHkzoaGhLF26FK/XG0iTqtfrmy1XS+fpdDp0Ol2z5xiNxsDjbfndmc3moN/HQd0lPT2do0ePMmnSJMaMGcMLL7yAyWRqsSK/zGq1UllZGfi5srLymgndhw8fzssvv0xdXV1g2dUVmZmZjXoF2rMroyclc6i4XM3hI58SGTaAZcvnEh7uH7LoDuXvSfXYXUkdtl1vqkOXy9UlG3ldnVjEbDZz2223sWLFChYvXozX62XRokWsXbuWzZs3B8bGGxoa0DQNr9fL9773PR588EFuueWWwDi6zWZDVdUWE5YUFBQwaNAgAD788EPS0tLwer306dOHXbt2sWTJEhwOB4cPH+aBBx5o8Trz5s3jxRdfJCUlhZ///Od4vV5qamqIjIzEZDKRk5PD4cOH8fl8eL1eNE0LfP9lLZ03duxYVq5cSUFBQaDLPSYmhunTp/Paa6/xq1/9KtDlfqOtdJfL1eR93FJylqCmqP/oRz8KpKn71re+xciRI+nXr19gTOJ60tLSuHTpEuXl5Xi9XrKysposqSotLQ205AsKCvB6vY0mR4jGtm3dg6Loycy8IxDMhRCiIy1ZsoTs7GwWL14MfJE+9f/+7/+47bbbuPvuu/njH//YbPrU6dOns3jxYnJzc5uscrrab3/7W2bNmkVmZiaffPIJv/rVrwB/7LHZbMyaNYv58+dfN31qdHQ0gwcPpqSkJDDf64477sDn85GZmcmzzz7LuHHjgnrdLZ1ntVoDaVAzMzN5+OGHAf/2sLW1tcyaNYuZM2eSlZUV1H3a6rrpU1VV5ZVXXuGhhx7CaDTe8I2OHDnC66+/jqqqzJw5k6985Sts3boV8M96fO+999i1axd6vR6TycQ3v/nNoJat3YzpU7NPFbH943cZ1H8cCxdP7eriNNFT6rE7kzpsu95Uh5I+tefqVulTdTpdYOp9W4wbN67Jp6Grly8sWbLkmp/ahJ/L5WPv3j0Y9CHMnjuxq4sjhBCimwhqDH3BggX885//5Ktf/aosh+pin3yUg8N1mVsnzyAkxNzVxRFCiBvSHdOnXpmJ/2VvvfVWj9hBLqjovGXLFmpqavjggw+aTFL785//3CEFE00VFzrIO3eAsNBoJkxsee2nEEJ0d90xfWpsbCzbtm1rt+t1tqACuiRh6Xr2Bh+7PzmO11fPvJkLg95yVwghxM0hqIB+rZmEouOpPo0De2upqjtBUlIygwYN7OoiCSGE6GaCCujX2q+9teMdovWyTzgpKj6FT3Uybdrtsi2uEEKIJoIK6FdvCgP+RfbZ2dlMmjSpQwolvlBa4iEvp5Y652cMHDiQPn36dHWRhBBCdENBBfTvf//7TR47duwYe/bsafcCiS/YbT6O7bfj9GXj83mYMmVKVxdJCHET6sx86NXV1Tz88MMUFRXRr18//ud//ofo6OhW50Nfu3Zt0JufXe2xxx7ju9/9bo/MiXHDM6tGjx7dZMmBaF/HDzrw+GxU1p5m6NChjRLcCCFEZ7mSD/1qGzduZMmSJdx3333ce++9ZGVlsWXLFp555hkKCwsBAvnQX3zxRXbt2sW2bdv4yle+QlFRUYv3evnll5k6dSp79+5l6tSpvPzyywCN8qFv2bKFv//979e8zksvvdTs45qmoapqi+c999xzPTKYQ5At9LKyskY/u1wu9uzZ02sSH3RHleVeKsq8+Iyn0DSNyZMnd3WRhBDdwKkjdupqfO16zchoPSPHtbwTXWfmQ//Pf/7D22+/DcA999zDsmXL+PnPf96qfOi/+c1vcDqdzJkzhyFDhvD4449z7733MmXKFA4fPsxf//pX/vSnP3H8+HGcTicLFizgscceA/y9EU8++SRjxoxh8ODBPPDAA2zfvh2LxcL69euJj49v9p5bt25l7dq1uN1uYmJi+NOf/kR8fDw2m42VK1c2yY3eXB71tgoqoH+528JkMjFw4EB+8IMftLkAonlns50o+nqKSvz5hqOiorq6SEKIm1Rn5kOvqKggMdGfIjYxMTEwh6s1+dCfeOIJ1q9fH1hTXlRURH5+Ps8//zy//e1vAXj88ceJiYnB5/OxfPlysrOzm6zostvtjBs3jpUrV/LMM8/wxhtv8JOf/KTZe06aNIlNmzahKApvvvkmr7zyCk899RTPP/98IDc6+OegVVZWNptHva3aPMtdtL+qy15KL9bR4P0Ug8Egkw+FEAHXakl3pM7Mh96c1uRDb07fvn0ZP3584OdNmzbxxhtv4PP5KCsrIzc3t0lAN5lMzJkzB4BRo0axe/fuFq9/6dIlHn74YcrLy3G73YEc6bt27QoMG4A/aczWrVubzaPeVkGNoZ8/f75JkoOKigrOnz/fLoUQX9A0jay9pyip/Df1DRXMnDmzS5IyCCHE1e6880727NnDyZMncTqdjBo1iiFDhnDy5MnAMevWreOFF16gpqYGgIyMDE6dOgV8sQvbvffei81ma/E+cXFxgWHesrKywNyhd999lzvuuAOj0UhcXBwTJ07k+PHjQZf/6r+jFy5c4NVXX+Wtt95i+/btzJ49G6fT2eScK3nSAfR6/TWTrDz55JN8+9vf5qOPPuJ3v/tdYDtaTdOaXWrcEcuPgwroL730Ej5f4zEbr9fLn/70p3Yv0M3Mbrez8b0PKLiwi6ioGP7f//t/QWWcE0KIjhYWFsZtt93Go48+GkiktWTJEg4dOhTInAngcDgC33//+99n7dq15ObmNvt8c+bOncuGDRsA2LBhA/PmzQMgJSWFvXv3omkadrudI0eOkJ6e3uJ1jEYjHo+n2efq6+sJCQkhMjKSy5cvs2PHjuu8+uurq6sjKSkpUO4rZsyYwfr16wM/19TUMH78eD799FMuXLgA0G5d7kEF9KvHNK5ISkri8uXL7VIIAT6fj3feeYei4vPER49n+fJlREdHd3WxhBAioDPyof/gBz9g165d3H777ezatSswV6u1+dC/8Y1vkJmZyQ9/+MMmz40YMYKRI0cyc+ZMHn30USZObHvmyp/+9Kc89NBDLF26tFEil0cffTSQGz0zM5OsrKwW86i31XXzoQM88sgj/OhHP2LQoEGBxwoKCli7di0vvvhiuxTkRvWWfOiHDx9m7969JEbPZNzEwQweZumScrSH3pSHuqtIHbZdb6pDyYfec3WrfOjgn134+9//nkWLFpGYmEhZWRmbNm3iK1/5yg0XUnyhoaGBAwcOEB3Zj6iIVAamS1pUIYQQrRNUQM/MzCQsLIyPP/6YyspKrFYr9913X2AnINE2e/bswedTCTdOYNAQMwaj7NUuhOjdumM+9GD88Y9/bJKydeHChaxYsaJD7tcaQXW5d2c9vcu9uLiYd955h5SkWwgzjGH23ZEYe3hA701dnV1F6rDtelMdSpd7z9WZXe5BTYr761//ypkzZxo9dubMGf72t7/dWAkF4J8It3PnTsLDIjCowxk0xNzjg7kQQoiuEVRA37t3L2lpaY0eGzRokCRnaaMTJ05QVVVFStIkTCYDAweburpIQggheqigArqiKE02s1dVlR7eW9+lnE4n+/fvJzk5FVd9MoMyzBhNN5wrRwghxE0uqAgydOhQ/vGPfwSCuqqq/POf/5RNT9rgyJEjuN1uEqLHYTQqDBwsM9uFEELcuKAC+re//W1OnjzJQw89xKpVq3jooYc4efIk999/f0eXr1ey2+0cP36cgQPSqa2MZMBgMyaztM6FEN3TsmXL2LlzZ6PHXnvtNVatWkVBQQH33XcfU6ZM4c4772TZsmXs27cvcNyOHTtYsGAB06dPZ86cOXzve9+jpKSkxXtVV1fzta99jdtvv52vfe1rgW1kPR4PK1asYPbs2cyYMaPF9Kg3avLkyVRVVbXrNTtbUFHEarXyu9/9jp/97GcsWrSIRx55hBEjRvDEE090dPl6pUOHDuH1eokJH4PeAGlDpHUuhOi+emI+9JtRUOvQwb/5SV5eHjt37qSwsJBhw4bxrW99qwOL1jvV19dz8uRJBqcPoboijLQh0joXQgRv165d7b7tdnx8PNOnT2/x+Z6WD/3111+nqKiI1atXA/6MoSdPnuSZZ57h/vvv5+LFi7hcLh544AHuvffeoOqopfOay2tus9lYvXp1kxzoHe2aAd3r9XLo0CF27tzJ8ePHSUpK4vbbb6eiooJHHnlEcnTfgIMHD6JpGtaoMZQ1wKAMaZ0LIbq3npYPfeHChSxatCgQ0Ddt2hTYX/4Pf/gDMTExOBwOFixYwPz58xvtvd6S5s7TNK3ZvOYvvvhiIAe6wWDotP0QrhnQv/Od76DT6ZgxYwZf/epXA3u5X51ZRwSvtraW7Oxshg0dweWLZvqmmrCESOtcCBG8a7WkO1JPyodutVpJTU3l8OHDDBw4kPz8/EAClr/+9a98+OGHgH9jsnPnzgUV0Js7r7Kystm85rt37+aVV14JnNtZibauGU369++PzWYjLy+P/Px8GhoaOqVQvdX+/ftRFIX42NH4fJA2VFrnQoieoaflQ1+0aBGbNm1i8+bN3HnnnSiKQlZWFrt372bTpk1s376dkSNHNtk2tjnXOq+5vOYt5UDvaNcM6L/85S956aWXGD16NJs2beK73/0ua9asweVyNcmPfj3Hjh1jxYoV/OhHP+K9995r8vzu3bt57LHHeOyxx1i9ejXnz59v1fW7u4qKCnJychg1ajSXLhhI6GMgIkrf1cUSQoig9LR86HfddRf/+c9/eO+991i0aBHgn8MUFRVFSEgIeXl5HDlyJKjX3tJ5LeU1by4Heme4bn9vfHw8y5YtY+3atfziF78gJiYGRVH42c9+xt///vegbqKqKuvWreOJJ57ghRdeYO/evRQXFzc6JiEhgV/+8pc899xz/Nd//Rd/+ctfbuwVdVNZWVmYTCaS4kbjdmmkDe256VGFEDennpQPPTo6msGDB1NSUsLYsWMBuOOOO/D5fGRmZvLss88ybty4oF53S+e1lNd8xYoVgRzoM2fOJCsrK6j7tNUNJWdxu90cOHCAXbt2BbV07ezZs2zYsIGf//zngL/rBGDp0qXNHt/Q0MBPf/pTXn311eteuyckZ7mSgGXKbVOoK8/AaFKYmhneJV0ynaE3JcXoKlKHbdeb6lCSs/Rc3S4f+peZTCamTp3K1KlTgzq+qqoqMA4C/k81V3fBfNnHH38c+ET1Zdu3b2f79u0ArFmzhri4uFaU/NoMBkO7Xg/8YynvvPMOkZGRpA+azO6CSu6Yl0R8fPPLLXqDjqjHm43UYdv1pjosKyvDYLihP9dt1lX37U3aUodmszno93Gn/Kaa6wRoqXV66tQpduzYwa9+9atmn8/MzCQzMzPwc3t+Au+IT/R5eXkUFxcze/ZsTh2tITRMR3ikg4oKZ7vepzvpTS2jriJ12Ha9qQ5dLhd6fefPuenIFnp3zId+ZSb+l7311ltBzYRvTlvr0OVyNXkft2sLvbWsVmtgLSFAZWVls+sHCwsLefXVV1m1ahURERGdUbQOpaoqWVlZxMTEkBg/mIJTdkaOC0HR9c6udiFEx+iNibB+85vftMt13n///Xa5DnwxE787ac3vvlMWnH4DkQAAHl9JREFUQaelpXHp0iXKy8vxer1kZWUxYcKERsdUVFTw3HPP8cMf/rDFTx89TXZ2NjU1NUyZMoXzuR6MJoV+AyVFqhCidXQ6nYxl34S8Xi86XfBhulNa6Hq9nvvvv59f//rXqKrKzJkz6devX2Cpw9y5c3n77bdpaGgIbEag1+tZs2ZNZxSvQ6iqyv79++nTpw9JCf3JPtxA+hAzBoO0zoUQrWOxWHA6nbhcrk6dTGs2m4Napy1adqN1qGkaOp0OiyX4FVE3NMu9O+mus9wLCwvZuHEjCxYswFmXzLlcF7MXRhIS2vt3hutNY5ddReqw7aQO207qsO06og5b6sXu/dGli5w9exaTyURKcn8uFLhI7me8KYK5EEKIriERpgN4vV7y8vJIT0+n5IIXrxcGSYpUIYQQHUgCegc4d+4cHo+HwYMzOHfWRWy8nuhYWcsphBCi40hA7wBnz54lNDQUg5KAw65JilQhhBAdTgJ6O3O5XJw/f56MjAzOnfUQGq4jKdnY1cUSQgjRy0lAb2f5+fn4fD76pqRTXeljQLpJNpIRQgjR4SSgt7MzZ84QFRWFyxYNCqSkykYyQgghOp4E9HZks9koLi4mIyODkgte4hMNWEKkioUQQnQ8iTbtKDc3F03TSIxPw2FTpXUuhBCi00hAb0dnz54lLi6OhppwdHro01cmwwkhhOgcEtDbSW1tLaWlpQwenMHFIg9JyUYMRpkMJ4QQonNIQG8nubm5AFijB+Jxa/QdIN3tQgghOo8E9HZy9uxZkpKSqL5swWhS+P/t3XtsHOW5+PHvzM7O+n7bje9OHDsBAjSQHKfQIKDU+UUqlzZCbUovqgJBahMQtFxEqCpUteVWmobCLygRgkCRWhWpJRIc2nIMKVDMaQNOmjQQYjuJ4xDHjr2+7Nq7O7Mz7/lj7Y3tOIkTG3ttno+08l7m8swz3nnmfWdnZk6xXBlOCCHE1JGCPgm6u7vp7Oykumohx4/ZlFZ40eXccyGEEFNICvokOHDgAADZGfNwHSifJ93tQgghppYU9AlSSnHgwAFKS0sJnjBJz9TJD3imOywhhBCfM1LQJ6irq4vu7m6q5i/kRHucsrleNE2624UQQkwtKegTdODAATRNw2fMBQUV8ut2IYQQ00B+ij0BSikaGxspLy/nxDEP+X6NrBzpbhdCCDH1pIU+AR0dHfT29lJeVk2oz5Vzz4UQQkwbKegT0NjYiK7reNxydB3K5sqlXoUQQkwP6XI/T0O/bq+omEtHm4fiMgOvKftHQgghpodUoPPU2tpKOBymuLAa21JUzJfudiGEENNHWujnac+ePaSnp6OscnxpMKdIUimEEGL6SAv9PPT19XHo0CEuvPBiOtsTN2LR5FKvQgghppEU9POwd+9eAPw5F6Lk3HMhhBApQPqJz1E8Hmffvn3Mnz+fznaT3HyN7Fw591wIIcT0mrKCvnv3brZt24brutTW1rJq1aoRn3/66ac888wzHDp0iFtuuYWvfe1rUxXaOWlsbCQajbKg6lIa97pcujR9ukMSQgghpqbL3XVdnnvuOX7yk5+wadMm3nvvPY4ePTpimKysLG699VZuuummqQjpvO3Zs4e8vDwcqxBNg9IKOfdcCCHE9JuSgt7U1ERxcTFFRUUYhsHy5cvZuXPniGFyc3NZsGABHk/qdl93dHTQ3t7O4sWLOdZqM6fYwJcmP0MQQggx/aakyz0YDOL3+5Ov/X4/jY2N5zWturo66urqAHjssccIBAKTEiOAYRhnnN67776LaZosrPovWj/p4otX+QkEsidt/rPF2fIozk5yOHGSw4mTHE7cVOZwSgq6UuqU9873FqMrVqxgxYoVydednZ3nHddogUDgtNOLRqPs2bOHRYsW0fRJCI8BmTlROjtjkzb/2eJMeRTjIzmcOMnhxEkOJ+6zyGFpaemY709Jf7Hf76erqyv5uquri/z8/KmY9aQ5ePAgjuOw6KJFtB21KS7zYhhy7rkQQojUMCUFvbq6mra2Njo6OojH49TX11NTUzMVs540TU1NZGdng+vHthRl8+TccyGEEKljSrrcPR4Pt912Gw8//DCu63LddddRUVHBG2+8AcDKlSvp6elhw4YNRCIRNE3j9ddf5ze/+Q0ZGRlTEeIZxWIxjhw5wuLFi/n0iI3p0+RSr0IIIVLKlFWlpUuXsnTp0hHvrVy5Mvk8Ly+PLVu2TFU45+TQoUO4rsv8+QvY80+bufNNdLnUqxBCiBQi51yNQ3NzM5mZmSi7ANdButuFEEKkHCnoZ2FZFocPH6a6uppPj8TJyNTJ96fuufJCCCE+n6Sgn0VLSwuO4zBvbhWdHXHK5nnP+5Q7IYQQ4rMiBf0smpqaSE9Px0MhKCguk0u9CiGESD1S0M8gHo8nu9s72x1Mn0ZuvnS3CyGESD1S0M+gpaUF27aprqqm43icwmJDutuFEEKkJCnoZ9Dc3IzP5yM7sxjbUswplu52IYQQqUkK+mk4jsPBgwcT3e0diWvRzymWi8kIIYRITVLQT6O1tRXLsqiurqbjuE1uvkdulSqEECJlSYU6jYMHD+L1eikuKqOny6GwRFrnQgghUpcU9DEopTh48CCVlZV0d4FSyPFzIYQQKU0K+hiOHz/OwMAAVVVVnDgex/AiV4cTQgiR0qSgj6G5uRld15k3bx4njtsEirxyMxYhhBApTQr6KEopmpubKS8vx7a8RAYUhfLrdiGEEClOCvoowWCQ3t5eqqurOdFmA3L8XAghROqTgj5Kc3MzAFVVVXQcj5OVrZORKWkSQgiR2qRSjdLc3ExxcTFpvgy6TsSZUyKtcyGEEKlPCvowPT09nDhxgurqao5/auM6UCTnnwshhJgBpKAPs3//fiDR3d78SYzMbJ1AkRR0IYQQqU8K+jAfffQRBQUFOHY2vd0O1Rf65O5qQgghZgQp6IMikQgtLS1UV1dz8JMopk+jfJ453WEJIYQQ4yIFfVB7ezuaplFcVEn7sTiVC0w8hrTOhRBCzAxS0AdVVlbywAMP0NuZg65D5QLfdIckhBBCjJsU9GE0TD49YlNeacqtUoUQQswoUrWG+XhvL64DVRdK61wIIcTMIudkDeoeiLP33/1kBnTCmoPqt0lzbTojDm39Dm0DDl1Rh8tLs1lcnDnmr9+VbSXutWoYBKMue9sHyDI9LCzwkWMAcRtcBxyHgVic1j6bTK9GWbYXzeMBXQfTB17zlOm39saoa+4lOBBnbp5JZV4alfk+AhkGmqahlIJYBCwLZaZxNAr7OiJ0R+MsLsrkojnpGIM3mFFWDNUfojMY4nhfjNysdPx5mWTkZqObI3dm4q6iI2xzLGTRFrLojsS5vCSTLxRlnPEMAKUUkbhLv+WS4/PgM86876hcF6IRiAwkcpCVzbGQzb+Ohkj36lw9L4dMc+Qd7/oth3db+ghbLktKMpmf70OfwWclKKXAisFAP67pRbkumn5u+9xKKVAuaPpp149yHYjHx/w/+6wppcCJJ+avVOKBSnxo+tCM6bmQk7KtxP+e10zkxUi9TWMid04iZ66bWM+uO+y1A7onsQzG4DZlrGnE42BFIRZLrIshmgYeA9LSwedD0xPjK9eFWDSRH9saOUFNg7Q0MNMS60/XT87DthLTV25iFSsFuga+DDBH/u8px4GBMAz0J4bTtJMPjycR19DfuJ34ngw9hpbZNMHrA41EPlxnVH4G82V4wZeeWE7DOOU7kPgeWoM5ig6OYyQeHm8iDl0HTU/81U//XZtqmlJKTXcQE3Hs2LFJmc777zTT2ebnv50gbcoacxhdubiaTkWkg+u793Kt1UJarJ9YJEpX3EOHN5v/5FWzq+BCDmWXjRi3KNLFwr5WLI9BS2YJ7en+5Gd5VohLepq5tOcgpQMnMDUX02vg85nsz62kLmcR+81CPMqlAIsTWlpyXNO18Vsh/NEe/NFubN1gX14VvWY2AJpSKE0jIx5lcd9B5oaPczijkAPZc+nx5YyIMc2JkWv3Y3u8WLoXSzewtJEbNh2Fi0ZJvI/a7r1c0/YhfUYGjVnlHMgs42BGMT3eDMKeNBzt5AalwA5RYvdR4oTIcqKYjoUZj+GLxzBjEcxYP6Zj4VUOjdkV/O+cL9CaWTRsOeMsDzfx/07sAuVS57+c93IvwNJPFoA8J8IS6xiX2B2U2r0Ux0PkxvtRClq8BfzHV8w+XzEnjEyK4mGKnTAlbphMZdNq5NJi5NFi5NGtpTHPDnJB5DgLw0eZHzqGV1PgNUAf3LA4g0UxbuO6LrZuYBkmlm5iebyJDZTjgGOD4+DVwDR0fIaO6dHJdGNk2BE8rpPY8EQjMNBPTEHYyMDRPYkNRkYmZGRh+rxkexQerxe8XrBt6A9BuA/CocTGx1XESYyvdJ0sn4E3MxMysxMbnlAvTqiPsOUQ072ADj5f4mH6EhtLwwCPga5rZLkxfK6NNlR4HSexMXXiiedDGzSPJ7HhtW2wY4N/rcE6Pbh5GRp/eAEZi+FNbGgHN7bDN5rD48NjJKYVi5582NbgPAbn47qJ+Q/FYfogJ+/kQ9OhqwOCJyDUO+rLric2+lnZkJGV+OtLS8wnGkn8tayRxUbXE/O17ZM7LZqWeB8S8xs9vG2dnKYVS3xm+hLzGlrP0cjgPCNnzt1oup4odpCIA4blZRxME003UNGB8c/TMBLLfTYeA9IzEssZ6U8U8ungMcCjn9zhgMGdkHMsi4aR3JHC60V/9NnkDlEgEKCzs3NSwy4tLR3z/Skr6Lt372bbtm24rkttbS2rVq0a8blSim3btrFr1y58Ph/r16+nqqrqrNOdrILe33qEw+/sIkyMfm8mIU8aEY8Pv25T4olRosfIcG3e60/nv61CDmrZpLs2Hk0R1k6e3qajuMjTz1K9h8u1biKal0ayaXSzaHIy8GkulV6LeWacuWacXkfnPzEf/4mmEXTHbhWU2j2s6N7Ll9sbyLPCDGTkciSnjJbMYo6l+ek2s+n0ZBLU0lCaxsWefi6lh0viJ8iND7BH99NAAQ346cJHqR7jgjSbC7KgLFOnN2ITHIjTFVP02gqvFcW0EkU2LRqmMNpNSbSLkmiQNMfif0su538CS/jIVzwizhxlscDtpchjk+HGyCZOhhanxzVoU2m0kc5xPZN+zcTWTn9/eR3FIq2PL0WP8MXgx/Ri8j+5l/BueiURLVHA05XN1fZRVkSa8dshdhtFNPhK+LdZSthzspchw03snA3oiXVUEu+jJN5HuyeLdiOb+GAcmlKUxHuZawXJcyIcSpvDIa8f6wxxTpSmFJnYZCkbW/MQ0kyssxwFy3Atsp0I5lBrTNdRuk5MMwgrgwgj401TcbLcGB7lEvL4GNDO7VRMr3LIcmNkKDsR2VCracjwzYemAVqihTT0nJN/ThmGUa2aod6FZItq+KZpWGFWg881EkVyeGtu2PQ1XUe5auT0B3vIcJ3Ee8Nbfrp+stdgeBxDj8GeD3Tt5HyHehiGhzo6R6OXcfjwI2LXhy3fYAxow1qDY+V0+Gtt1PiD+RpBGyNfo9fByeXXNC0xheF5HmuZhvI11LpOzmdEsIPDj+pZ0IfttI3Ia3KEYet+dM60YTEP6+05JS/D8j0UrzuU49G0wXU8tE6GzXv4PJIhqlExwP//Xk1yarOuoLuuy913381Pf/pT/H4/Dz74IHfffTfl5eXJYRoaGvjrX//Kgw8+SGNjIy+88AKPPPLIWac9WQUdxp94pRT7T0TYcagPjw7+dC8FGQaBDIPqgrRTuobHQynF8bBN54CNFVdYjiLmuBRmelk0J31SunSUUsQcRdpZur/H62hfjH+1hglkerkwkEZhphdN08aVR1cpbCcRj+W4WPHE8lqOoijLS17aqTs30bjL+0dCKOBLFdmke09dDsdN5LFt8BDBsZCF48LFhelcWpRBIMM7YtjOAZt+y6UsxzzlsEDcVRzujtHaG8M9w9dE0zS8uoZpaPg8Ol6PNqIsK8B21eB6dYk5in7LIWQ5hGIO4ZiLaWhkmR6yfR6yTJ383BzCoVBy/FhcJYcPxRxsd2Q8pq6R7Rsa34OmQdhyCMcS83FcEp8PzsNnaKM3syPzqEjEZjn0xRwG7HG26lKIzzSJWWP3tonxkRyenweuPtlDO5UFfUoOFDU1NVFcXExRUaILdfny5ezcuXNEQf/ggw+45ppr0DSNCy64gP7+frq7u8nPz5+KEM+JpmksKsxgUWHGpE6zJNukJPuzu5iNpmmkTeK59eU5PsovOb8fEOqahs/Q8BkA49sBSjN0rqvKPeMwHl2jLMekLOfsefToGkVZpx/O0DUW+NNY4E877TCflcRG4LPrHfg8+Cw2pJ83ksOZZUoKejAYxO8/eczY7/fT2Nh4yjCBQGDEMMFg8JSCXldXR11dHQCPPfbYiHEmyjCMSZ3e55XkceIkhxMnOZw4yeHETWUOp6Sgj9WrP+YvC88yDMCKFStYsWJF8vVk7j3K3ujkkDxOnORw4iSHEyc5nLip7HKfkvPQ/X4/XV1dydddXV2ntLz9fv+IhR5rGCGEEEKMbUoKenV1NW1tbXR0dBCPx6mvr6empmbEMDU1NbzzzjsopThw4AAZGRlS0IUQQohxmpIud4/Hw2233cbDDz+M67pcd911VFRU8MYbbwCwcuVKlixZQkNDA3fddRemabJ+/fqpCE0IIYSYFabsckhLly5l6dKlI95buXJl8rmmadx+++1TFY4QQggxq8i13IUQQohZQAq6EEIIMQvM+Gu5CyGEEEJa6CNs2LBhukOYFSSPEyc5nDjJ4cRJDiduKnMoBV0IIYSYBaSgCyGEELOA52c/+9nPpjuIVDKeW7aKs5M8TpzkcOIkhxMnOZy4qcqh/ChOCCGEmAWky10IIYSYBaSgCyGEELPAlF36NdXt3r2bbdu24boutbW1rFq1arpDSnmdnZ1s3ryZnp4eNE1jxYoVXH/99YTDYTZt2sSJEyeYM2cOP/7xj8nKyprucFOa67ps2LCBgoICNmzYIDk8R/39/WzZsoXW1lY0TWPdunWUlpZKDs/Ba6+9xltvvYWmaVRUVLB+/Xosy5IcnsUzzzxDQ0MDubm5bNy4EeCM399XXnmFt956C13XufXWW7n88ssnLxgllOM46s4771THjx9Xtm2r++67T7W2tk53WCkvGAyq5uZmpZRSAwMD6q677lKtra3qpZdeUq+88opSSqlXXnlFvfTSS9MZ5ozw6quvqieffFI9+uijSiklOTxHTz/9tKqrq1NKKWXbtgqHw5LDc9DV1aXWr1+vYrGYUkqpjRs3qh07dkgOx2Hfvn2qublZ3XPPPcn3Tpe31tZWdd999ynLslR7e7u68847leM4kxaLdLkDTU1NFBcXU1RUhGEYLF++nJ07d053WCkvPz8/+evN9PR0ysrKCAaD7Ny5k2uvvRaAa6+9VnJ5Fl1dXTQ0NFBbW5t8T3I4fgMDA3z88cd85StfAcAwDDIzMyWH58h1XSzLwnEcLMsiPz9fcjgOF1988Sm9FqfL286dO1m+fDler5fCwkKKi4tpamqatFikyx0IBoP4/f7ka7/fT2Nj4zRGNPN0dHRw6NAhFixYQG9vb/Je9vn5+fT19U1zdKnthRde4Hvf+x6RSCT5nuRw/Do6OsjJyeGZZ56hpaWFqqoq1qxZIzk8BwUFBdx0002sW7cO0zS57LLLuOyyyySH5+l0eQsGgyxcuDA5XEFBAcFgcNLmKy10QI1x5p6madMQycwUjUbZuHEja9asISMjY7rDmVE+/PBDcnNz5VzfCXAch0OHDrFy5Up+9atf4fP52L59+3SHNaOEw2F27tzJ5s2b2bp1K9FolHfeeWe6w5p1xqo1k0la6CRa5F1dXcnXXV1dyb0rcWbxeJyNGzdy9dVXc8UVVwCQm5tLd3c3+fn5dHd3k5OTM81Rpq5PPvmEDz74gF27dmFZFpFIhKeeekpyeA78fj9+vz/Z8rnyyivZvn275PAc7N27l8LCwmSOrrjiCg4cOCA5PE+ny9voWhMMBikoKJi0+UoLHaiurqatrY2Ojg7i8Tj19fXU1NRMd1gpTynFli1bKCsr48Ybb0y+X1NTw9tvvw3A22+/zbJly6YrxJT3ne98hy1btrB582Z+9KMfcemll3LXXXdJDs9BXl4efr+fY8eOAYniVF5eLjk8B4FAgMbGRmKxGEop9u7dS1lZmeTwPJ0ubzU1NdTX12PbNh0dHbS1tbFgwYJJm69cKW5QQ0MDL774Iq7rct1113HzzTdPd0gpb//+/Tz00EPMnTs3eYji29/+NgsXLmTTpk10dnYSCAS455575FSXcdi3bx+vvvoqGzZsIBQKSQ7PweHDh9myZQvxeJzCwkLWr1+PUkpyeA5efvll6uvr8Xg8VFZW8sMf/pBoNCo5PIsnn3ySjz76iFAoRG5uLqtXr2bZsmWnzduf//xnduzYga7rrFmzhiVLlkxaLFLQhRBCiFlAutyFEEKIWUAKuhBCCDELSEEXQgghZgEp6EIIIcQsIAVdCCGEmAWkoAvxObd69WqOHz8+3WGc4uWXX+app56a7jCEmDHkSnFCpJA77riDnp4edP3kvvaXv/xl1q5dO41RCSFmAinoQqSYBx54gMWLF093GLOK4zh4PJ7pDkOIz5QUdCFmiL///e+8+eabzJ8/n7fffpv8/HzWrl3LF77wBSBxXehnn32W/fv3k5WVxde//nVWrFgBJG6NuX37dnbs2EFvby8lJSXcf//9BAIBAPbs2cMjjzxCKBTiqquuYu3atWPeoOjll1/m6NGjmKbJv/71LwKBAHfccQfV1dVAovv+qaeeori4GIDNmzfj9/u55ZZb2LdvH08//TRf/epXefXVV9F1ndtvvx3DMHjxxRfp6+vjpptuGnGVRtu22bRpE7t27aKkpIR169ZRWVmZXN7nn3+ejz/+mLS0NG644Qauv/76ZJytra14vV4+/PBDvv/974+4Pa0Qs5EcQxdiBmlsbKSwsJDnnnuO1atX8+tf/5pwOAzAb3/7W/x+P1u3buXee+/lD3/4A3v37gXgtdde47333uPBBx/kxRdfZN26dfh8vuR0GxoaePTRR3niiSd4//33+fe//33aGD788EOWL1/OCy+8QE1NDc8///y44+/p6cG2bbZs2cLq1avZunUr7777Lo899hg///nP+dOf/kR7e3ty+A8++IAvfelLPP/881x11VU88cQTxONxXNfl8ccfp7Kykq1bt/LQQw/x+uuvs3v37hHjXnnllWzbto2rr7563DEKMVNJQRcixTzxxBOsWbMm+airq0t+lpubyw033IBhGCxfvpzS0lIaGhro7Oxk//79fPe738U0TSorK6mtrU3eAvPNN9/klltuobS0FE3TqKysJDs7OzndVatWkZmZSSAQ4JJLLuHw4cOnje+iiy5i6dKl6LrONddcc8ZhR/N4PNx8880YhsFVV11FKBTi+uuvJz09nYqKCsrLy2lpaUkOX1VVxZVXXolhGNx4443Ytk1jYyPNzc309fXxjW98A8MwKCoqora2lvr6+uS4F1xwAV/84hfRdR3TNMcdoxAzlXS5C5Fi7r///tMeQy8oKBjRFT5nzhyCwSDd3d1kZWWRnp6e/CwQCNDc3AwkbglcVFR02nnm5eUln/t8PqLR6GmHzc3NTT43TRPbtsd9jDo7Ozv5g7+hIjt6esPn7ff7k891Xcfv99Pd3Q1Ad3c3a9asSX7uui6LFi0ac1whPg+koAsxgwSDQZRSyaLe2dlJTU0N+fn5hMNhIpFIsqh3dnYm77Xs9/tpb29n7ty5n2l8Pp+PWCyWfN3T0zOhwjr83tGu69LV1UV+fj4ej4fCwkI5rU2IYaTLXYgZpLe3l7/85S/E43Hef/99Pv30U5YsWUIgEODCCy/k97//PZZl0dLSwo4dO5LHjmtra/njH/9IW1sbSilaWloIhUKTHl9lZSX/+Mc/cF2X3bt389FHH01oegcPHuSf//wnjuPw+uuv4/V6WbhwIQsWLCA9PZ3t27djWRau63LkyBGampomaUmEmHmkhS5Einn88cdHnIe+ePFi7r//fgAWLlxIW1sba9euJS8vj3vuuSd5LPzuu+/m2Wef5Qc/+AFZWVl885vfTHbdDx1//uUvf0koFKKsrIz77rtv0mNfs2YNmzdv5m9/+xvLli1j2bJlE5peTU0N9fX1bN68meLiYu69914MI7HZeuCBB/jd737HHXfcQTwep7S0lG9961uTsRhCzEhyP3QhZoih09Z+8YtfTHcoQogUJF3uQgghxCwgBV0IIYSYBaTLXQghhJgFpIUuhBBCzAJS0IUQQohZQAq6EEIIMQtIQRdCCCFmASnoQgghxCzwfy1tLhWqJT+1AAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "plot_result_graphs('problem_model', result_dict, keys_to_plot=['VGG_38', 'VGG_08'])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/res/fprop-bprop-block-diagram.pdf b/notebooks/res/fprop-bprop-block-diagram.pdf new file mode 100644 index 00000000..6c5f0e08 Binary files /dev/null and b/notebooks/res/fprop-bprop-block-diagram.pdf differ diff --git a/notebooks/res/fprop-bprop-block-diagram.png b/notebooks/res/fprop-bprop-block-diagram.png new file mode 100644 index 00000000..17f6a8b2 Binary files /dev/null and b/notebooks/res/fprop-bprop-block-diagram.png differ diff --git a/notebooks/res/fprop-bprop-block-diagram.tex b/notebooks/res/fprop-bprop-block-diagram.tex new file mode 100644 index 00000000..d2c2c7b7 --- /dev/null +++ b/notebooks/res/fprop-bprop-block-diagram.tex @@ -0,0 +1,65 @@ +\documentclass[tikz]{standalone} + +\usepackage{amsmath} +\usepackage{tikz} +\usetikzlibrary{arrows} +\usetikzlibrary{calc} +\usepackage{ifthen} + +\newcommand{\vct}[1]{\boldsymbol{#1}} +\newcommand{\pd}[2]{\frac{\partial #1}{\partial #2}} + +\tikzstyle{fprop} = [draw,fill=blue!20,minimum size=2em,align=center] +\tikzstyle{bprop} = [draw,fill=red!20,minimum size=2em,align=center] + +\begin{document} + +\begin{tikzpicture}[xscale=1.75] % + % define number of layers + \def\nl{2}; + % model input + \node at (0, 0) (input) {$\vct{x}$}; + % draw fprop through model layers + \foreach \l in {0,...,\nl} { + \node[fprop] at (2 * \l + 1, 0) (fprop\l) {\texttt{layers[\l]} \\ \texttt{.fprop}}; + \ifthenelse{\l > 0}{ + \node at (2 * \l, 0) (hidden\l) {$\vct{h}_\l$}; + \draw[->] (hidden\l) -- (fprop\l); + \draw[->] let \n1={\l - 1} in (fprop\n1) -- (hidden\l); + }{ + \draw[->] (input) -- (fprop\l); + } + } + % model output + \node at (2 * \nl + 2, 0) (output) {$\mathbf{y}$}; + % error function + \node[fprop] at (2 * \nl + 3, 0) (errorfunc) {\texttt{error}}; + % error value + \node at (2 * \nl + 3, -1) (error) {$\bar{E}$}; + % targets + \node at (2 * \nl + 4, -1) (tgt) {$\vct{t}$}; + % error gradient + \node[bprop] at (2 * \nl + 3, -2) (errorgrad) {\texttt{error} \\ \texttt{.grad}}; + % gradient wrt outputs + \node at (2 * \nl + 2, -2) (gradoutput) {$\pd{\bar{E}}{\vct{y}}$}; + \draw[->] (fprop\nl) -- (output); + \draw[->] (output) -- (errorfunc); + \draw[->] (errorfunc) -- (error); + \draw[->] (error) -- (errorgrad); + \draw[->] (errorgrad) -- (gradoutput); + \draw[->] (tgt) |- (errorfunc); + \draw[->] (tgt) |- (errorgrad); + \foreach \l in {0,...,\nl} { + \node[bprop] at (2 * \l + 1, -2) (bprop\l) {\texttt{layers[\l]} \\ \texttt{.bprop}}; + \ifthenelse{\l > 0}{ + \node at (2 * \l, -2) (grad\l) {$\pd{\bar{E}}{\vct{h}_\l}$}; + \draw[<-] (grad\l) -- (bprop\l); + \draw[<-] let \n1={\l - 1} in (bprop\n1) -- (grad\l); + }{} + } + \node at (0, -2) (gradinput) {$\pd{\bar{E}}{\vct{x}}$}; + \draw[->] (bprop0) -- (gradinput); + \draw[->] (gradoutput) -- (bprop\nl); +\end{tikzpicture} + +\end{document} \ No newline at end of file diff --git a/notes/environment-set-up.md b/notes/environment-set-up.md index eff30e2f..fb9ed953 100644 --- a/notes/environment-set-up.md +++ b/notes/environment-set-up.md @@ -25,7 +25,7 @@ the School of Informatics [DICE desktop](http://computing.help.inf.ed.ac.uk/dice should be able to used on other Linux distributions such as Ubuntu and Linux Mint with minimal adjustments. For those wishing to install on a personal Windows or OSX machine, the initial instructions for setting up Conda will -differ slightly - you should instead select the relevant installer for your system from [here](https://docs.conda.io/en/latest/miniconda.html) and following the corresponding installation instructions from [here](https://conda.io/projects/conda/en/latest/user-guide/install/index.html). After Conda is installed the [remaining instructions](#creating-the-conda-environment) should be broadly the same across different systems. +differ slightly - you should instead select the relevant installer for your system from [here](http://conda.pydata.org/miniconda.html) and following the corresponding installation instructions from [here](http://conda.pydata.org/docs/install/quick.html). After Conda is installed the [remaining instructions](#creating-the-conda-environment) should be broadly the same across different systems. *Note: Although we are happy for you to additionally set up an environment on a personal machine, you should still set up a DICE environment now as this will make sure you are able to use shared computing resources later in the course. Also although we have tried to note when the required commands will differ on non-DICE systems, these instructions have only been tested on DICE and we will not be able to offer any support in labs on getting set up on a non-DICE system.* @@ -273,7 +273,7 @@ This will change the code in the working directory to the current state of the c You should make sure you are on the first lab branch now by running: ``` -git checkout mlp2022-23/lab1 +git checkout mlp2020-21/lab1 ``` ## 6. Installing the `mlp` Python package @@ -302,11 +302,10 @@ Note that after the first time a Python module is loaded into an interpreter ins import mlp ``` -Running the `import` statement any further times will have no effect even if the underlying module code has been changed. To reload an already imported module we instead need to use the [`importlib.reload`](https://docs.python.org/3/library/importlib.html#importlib.reload) function, e.g. +Running the `import` statement any further times will have no effect even if the underlying module code has been changed. To reload an already imported module we instead need to use the [`reload`](https://docs.python.org/2.7/library/functions.html#reload) function, e.g. ``` -import importlib -importlib.reload(mlp) +reload(mlp) ``` **Note: To be clear as this has caused some confusion in previous labs the above `import ...` / `reload(...)` statements should NOT be run directly in a bash terminal. They are examples Python statements - you could run them in a terminal by first loading a Python interpreter using:** @@ -371,7 +370,7 @@ Below are instructions for setting up the environment without additional explana --- -Start a new bash terminal. Download the latest 64-bit Python 3.9 Miniconda install script: +Start a new bash terminal. Download the latest 64-bit Python 2.7 Miniconda install script: ``` wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh @@ -433,7 +432,7 @@ Make sure we are on the first lab branch ``` cd ~/mlpractical -git checkout mlp2022-23/lab1 +git checkout mlp2020-21/lab1 ``` Install the `mlp` package in the environment in develop mode diff --git a/notes/getting-started-in-a-lab.md b/notes/getting-started-in-a-lab.md index 4c1a6ac1..e952e876 100644 --- a/notes/getting-started-in-a-lab.md +++ b/notes/getting-started-in-a-lab.md @@ -34,15 +34,15 @@ We are now ready to fetch any updated code from the remote repository on Github. git fetch origin ``` -This should display a message indicate a new branch has been found and fetched, named `origin/mlp2022-23/lab[n]` where `[n]` is the relevant lab number e.g. `origin/mlp2022-23/lab2` for the second lab. +This should display a message indicate a new branch has been found and fetched, named `origin/mlp2018-9/lab[n]` where `[n]` is the relevant lab number e.g. `origin/mlp2018-9/lab2` for the second lab. We now need to create and checkout a new local branch from the remote branch fetched above. This can be done by running ``` -git checkout -b lab[n] origin/mlp2022-23/lab[n] +git checkout -b lab[n] origin/mlp2018-9/lab[n] ``` -where again `lab[n]` corresponds to the relevant lab number fetched above e.g. `lab2`. This command creates a new local branch named `lab[n]` from the fetched branch on the remote repository `origin/mlp2022-23/lab[n]`. +where again `lab[n]` corresponds to the relevant lab number fetched above e.g. `lab2`. This command creates a new local branch named `lab[n]` from the fetched branch on the remote repository `origin/mlp2018-9/lab[n]`. Inside the `notebooks` directory there should new be a new notebook for today's lab. The notebook for the previous lab will now also have proposed solutions filled in. diff --git a/notes/google_cloud_setup.md b/notes/google_cloud_setup.md new file mode 100644 index 00000000..874595f0 --- /dev/null +++ b/notes/google_cloud_setup.md @@ -0,0 +1,114 @@ +# Google Cloud Usage Tutorial +This document has been created to help you setup a google cloud instance to be used for the MLP course using the student credit the course has acquired. +This document is non-exhaustive and many more useful information is available on the [google cloud documentation page](https://cloud.google.com/docs/). +For any question you might have, that is not covered here, a quick google search should get you what you need. Anything in the official google cloud docs should be very helpful. + +| WARNING: Read those instructions carefully. You will be given 50$ worth of credits and you will need to manage them properly | +| ---------------------------------------------------------------------------------------------------------------------------- | + + +### To create your account and start a project funded by the student credit +1. Login with your preferred gmail id to [google cloud console](https://cloud.google.com/), click on Select a Project on the left hand side of the search bar on top of the page and then click on New Project on the right hand side of the Pop-Up. +Name your project sxxxxxxx-MLPractical - replacing the sxxxxxxx with your student number. Make sure you are on this project before following the next steps. +2. Get your coupon by following the instructions in the coupon retrieval link that you received. +3. Once you receive your coupon, follow the email instructions to add your coupon to your account. +4. Once you have added your coupon, join the [MLPractical GCP Google Group](https://groups.google.com/forum/#!forum/mlpractical_gcp) using the same Google account you used to redeem your coupon. This ensures access to the shared disk images. +5. Make sure that the financial source for your project is the MLPractical credit by clicking the 3 lines icon at the top left corner and then clicking billing -> go to linked billing account. +6. If it's not set to the MLPractical credits then set it by going to billing -> manage billing accounts -> My projects. Click the 3 dots under the Actions column for the relevant project and click change billing account. Select the MLPractical credit from your coupon. +6. Start the project + +### To create an instance +1. Click the button with the three lines at the top left corner. +2. Click ```Compute Engine```. You might be asked to activate it. +3. On the left hand side, select ```VM Instances```. +4. Click the ```CREATE INSTANCE``` button at the top of the window. +5. Name the instance ```mlpractical-1``` +6. Select region to be ```us-west1(Oregon)``` and zone to be ```us-west-1b``` (there are other suitable regions however this one has K80s available right now so we went with this one, feel free to find something else if for some reason you need to, but it is recommended ro run on K80 GPUs.) +7. In Machine Configuration, select ```GPU``` machine family. +8. Select NVIDIA Tesla K80. Those are the cheapest one, be careful as others can cost up to 8 times more to run +9. Series and in Machine type select ```2 vCPUs``` with ```7.5Gb memory```. +10. Under ```Boot disk```, click change. +11. On the new menu that appears (under public images), select the ```Deep Learning on Linux``` operating system, with the ```Pytorch 1.10, no-XLA``` version, then click select at the bottom. +12. You should consider going into the ```Advanced Options``` drop down menu at the bottom and enable ```Spot``` under ```VM provisioning model``` in the management tab. Using this option will be helpful if you're running low on credits. +13. Click ```Create```. Your instance should be ready in a minute or two. +14. If your instance failed to create due to the following error - ```Quota 'GPUS_ALL_REGIONS' exceeded. Limit: 0.0 globally.```, type ```quota``` in the search bar then click ```All quotas``` +15. Search for 'GPUS_ALL_REGIONS' in the filters +16. Tick in the box next to Global and then Click ```Edit Quotas``` in the top bar. +17. This will open a box in the right side corner asking for your details. Fill in those and then click Next. +18. Put your New Limit as ```1``` and in the description you can mention you need GPU for machine learning coursework. And then Send Request. +19. You will receive a confirmation email with your Quota Limit increased. This may take some minutes. +20. After the confirmation email, you can recheck the GPU(All Regions) Quota Limit being set to 1. This usually shows up in 10-15 minutes after the confirmation email. +21. Retry making the VM instance again as before and you should have your instance now. + + +#### Note +Be careful to select 1 x K80 GPU (P100s and P4s are 5x more expensive) + +You only have $50 dollars worth of credit, which should be about 125 hours of GPU usage on a K80. + + +### To login into your instance via terminal: +1. In a DICE terminal window ```conda activate mlp``` +2. Download the `gcloud` toolkit using ```curl -O https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-365.0.0-linux-x86_64.tar.gz``` +3. Install the `gcloud` toolkit using ```tar zxvf google-cloud-sdk-365.0.0-linux-x86_64.tar.gz; bash google-cloud-sdk/install.sh```. +**Note**: You will be asked to provide a passphrase to generate your local key, simply use a password of your choice. There might be some Yes/No style questions as well, choose yes, when that happens. + +4. Reset your terminal using ```reset; source ~/.bashrc```. Then authorize the current machine to access your nodes run ```gcloud auth login```. This will authenticate your google account login. +3. Follow the prompts to get a token for your current machine. +4. Run ```gcloud config set project PROJECT_ID``` where you replace `PROJECT-ID` with your project ID, you can find that in the projects drop down menu on the top of the Google Compute Engine window; this sets the current project as the active one +5. In your compute engine window, in the line for the instance that you have started (`mlpractical-1`), click on the downward arrow next to ```ssh```. Choose ```View gcloud command```. Copy the command to your terminal and press enter. +6. Add a password for your ssh-key (and remember it!). +7. Re-enter password (which will unlock your ssh-key) when prompted. +8. On your first login, you will be asked if you want to install nvidia drivers, agree and make sure the installation runs well. +9. Run ```nvidia-smi``` to confirm that the GPU can be found. This should report 1 Tesla K80 GPU. if not, the driver might have failed to install. Logout and retry. +10. Well done, you are now in your instance! When you login you may see an error of the form `Unable to set persistence mode for GPU 00000000:00:04.0: Insufficient Permissions` - you should be able to ignore this. The instance on the first startup should check for the gpu cuda drivers and since they are not there, it will install them. This will only happen once on your first login. Once the installation is finished you are ready to use the instance for your coursework. +11. Clone a fresh mlpractical repository, and checkout branch `coursework2`: + +``` +git clone https://github.com/VICO-UoE/mlpractical.git ~/mlpractical +cd ~/mlpractical +git checkout -b coursework2 origin/mlp2021-22/coursework2 +python setup.py develop +``` + +Then, to test PyTorch running on the GPU, run this script that trains a small convolutional network (7 conv layers + 1 linear layer, 32 filters) on CIFAR100: + +``` +python pytorch_mlp_framework/train_evaluate_image_classification_system.py --batch_size 100 --seed 0 --num_filters 32 --num_stages 3 --num_blocks_per_stage 0 --experiment_name VGG_08_experiment --use_gpu True --num_classes 100 --block_type 'conv_block' --continue_from_epoch -1 +``` + +You should be able to see an experiment running, using the GPU. It should be doing about 26-30 it/s (iterations per second). You can stop it when ever you like using `ctrl-c`. + +If all the above matches what’s stated then you should be ready to run your coursework jobs. + +### Remember to ```stop``` your instance when not using it. You pay for the time you use the machine, not for the computational cycles used. +To stop the instance go to `Compute Engine -> VM instances` on the Google Cloud Platform, slect the instance and click ```Stop```. + +#### Future ssh access: +To access the instance in the future simply run the `gcloud` command you copied from the google compute engine instance page. + + +## Copying data to and from an instance + +Please look at the [google docs page on copying data](https://cloud.google.com/filestore/docs/copying-data). + +To copy from local machine to a google instance, have a look at this [stackoverflow post](https://stackoverflow.com/questions/27857532/rsync-to-google-compute-engine-instance-from-jenkins). + +## Running experiments over ssh: + +If ssh fails while running an experiment, then the experiment is normally killed. +To avoid this use the command ```screen```. It creates a process of the current session that keeps running whether + a user is signed in or not. + +The basics of using screen is to use ```screen``` to create a new session, then to enter an existing session you use: +```screen -ls``` +To get a list of all available sessions. Then once you find the one you want use: +```screen -d -r screen_id``` +Replacing screen_id with the id of the session you want to enter. + +While in a session, you can use +- ```ctrl+a+esc``` To pause process and be able to scroll +- ```ctrl+a+d``` to detach from session while leaving it running (once you detach you can reattach using ```screen -r```) +- ```ctrl+a+n``` to see the next session. +- ```ctrl+a+c``` to create a new session + diff --git a/notes/pytorch-experiment-framework.md b/notes/pytorch-experiment-framework.md new file mode 100644 index 00000000..b3e5b782 --- /dev/null +++ b/notes/pytorch-experiment-framework.md @@ -0,0 +1,134 @@ +# Pytorch Experiment Framework + +## What does this framework do? +The Pytorch experiment framework located in ```mlp/pytorch_mlp_framework``` includes tooling for building an array of deep neural networks, +including fully connected and convolutional networks. In addition, it also includes tooling for experiment running, +metric handling and storage, model weight storage, checkpointing (allowing continuation from previous saved point), as +well as taking care of keeping track of the best validation model which is then used as the end to produce test set evaluation metrics. + +## Why do we need it? +It serves two main purposes. The first, is to allow you an easy, worry-free transition into using Pytorch for experiments + in your coursework. The second, is to teach you good coding practices for building and running deep learning experiments + using Pytorch. The framework comes fully loaded with tooling that can keep track of relevant metrics, save models, resume from previous saved states and + even automatically choose the best validation model for test set evaluation. We include documentation and comments in almost + every single line of code in the framework, to help you maximize your learning. The code style itself, can be used for + learning good programming practices in structuring your code in a modular, readable and computationally efficient manner that minimizes chances of user-error. + +## Installation + +First thing you have to do is activate your conda MLP environment. + +### GPU version on Google Compute Engine +For usage on google cloud, the disk image we provide comes pre-loaded with all the packages you need to run the Pytorch +experiment framework, including Pytorch itself. Thus when you created an instance and setup your environment, everything you need for this framework was installed, thus removing the need for you to install Pytorch. + + + +### CPU version on DICE (or other local machine) + +If you do not have your MLP conda environment installed on your current machine +please follow the instructions in notes/environment-set-up.md. Once your mlp conda environment is activated, please go to +[Pytorch's installation page](https://pytorch.org/get-started/locally/) and take some time to choose the right Pytorch version for your setup (taking care to choose CPU/GPU version depending on what hardward you have available). + +For example, on DICE you can install the CPU version using the command: +``` +conda install pytorch-cpu torchvision-cpu -c pytorch +``` + +Once Pytorch is installed in your mlp conda enviroment, you can start using the framework. The framework has been built +to allow you to control your experiment hyperparameters directly from the command line, by using command line argument parsing. + +## Using the framework + +You can get a list of all available hyperparameters and arguments by using: +``` +python pytorch_mlp_framework/train_evaluate_image_classification_system.py -h +``` + +The -h at the end is short for --help, which presents a list with all possible arguments next to a description of what they modify in the setup. +Once you execute that command, you should be able to see the following list: + +``` +Welcome to the MLP course's Pytorch training and inference helper script + +optional arguments: + -h, --help show this help message and exit + --batch_size [BATCH_SIZE] + Batch_size for experiment + --continue_from_epoch [CONTINUE_FROM_EPOCH] + Which epoch to continue from. + If -2, continues from where it left off + If -1, starts from scratch + if >=0, continues from given epoch + --seed [SEED] Seed to use for random number generator for experiment + --image_num_channels [IMAGE_NUM_CHANNELS] + The channel dimensionality of our image-data + --image_height [IMAGE_HEIGHT] + Height of image data + --image_width [IMAGE_WIDTH] + Width of image data + --num_stages [NUM_STAGES] + Number of convolutional stages in the network. A stage + is considered a sequence of convolutional layers where + the input volume remains the same in the spacial + dimension and is always terminated by a dimensionality + reduction stage + --num_blocks_per_stage [NUM_BLOCKS_PER_STAGE] + Number of convolutional blocks in each stage, not + including the reduction stage. A convolutional block + is made up of two convolutional layers activated using + the leaky-relu non-linearity + --num_filters [NUM_FILTERS] + Number of convolutional filters per convolutional + layer in the network (excluding dimensionality + reduction layers) + --num_epochs [NUM_EPOCHS] + The experiment's epoch budget + --num_classes [NUM_CLASSES] + The experiment's epoch budget + --experiment_name [EXPERIMENT_NAME] + Experiment name - to be used for building the + experiment folder + --use_gpu [USE_GPU] A flag indicating whether we will use GPU acceleration + or not + --weight_decay_coefficient [WEIGHT_DECAY_COEFFICIENT] + Weight decay to use for Adam + --block_type BLOCK_TYPE + Type of convolutional blocks to use in our network + (This argument will be useful in running experiments + to debug your network) + +``` + +For example, to run a simple experiment using a 7-layer convolutional network on the CPU you can run: + +``` +python pytorch_mlp_framework/train_evaluate_image_classification_system.py --batch_size 100 --seed 0 --num_filters 32 --num_stages 3 --num_blocks_per_stage 0 --experiment_name VGG_07 --num_classes 100 --block_type 'conv_block' --weight_decay_coefficient 0.00000 --use_gpu False +``` + +Your experiment should begin running. + +Your experiments statistics and model weights are saved in the directory tutorial_exp_1/ under tutorial_exp_1/logs and +tutorial_exp_1/saved_models. + + +To run on a GPU on Google Compute Engine the command would be: +``` +python pytorch_mlp_framework/train_evaluate_image_classification_system.py --batch_size 100 --seed 0 --num_filters 32 --num_stages 3 --num_blocks_per_stage 0 --experiment_name VGG_07 --num_classes 100 --block_type 'conv_block' --weight_decay_coefficient 0.00000 --use_gpu True + +``` + +We have also provided the exact scripts we used to run the experiments of VGG07 and VGG37 as shown in the coursework spec inside the files: +- run_vgg_08_default.sh +- run_vgg_38_default.sh + +**However, remember, if you want to reuse those scripts for your own investigations, change the experiment name and seed. +If you do not change the name, the old folders will be overwritten.** + +## So, where can I ask more questions and find more information on Pytorch and what it can do? + +First course of action should be to search the web and then to refer to the Pytorch [documentation](https://pytorch.org/docs/stable/index.html), + [tutorials](https://pytorch.org/tutorials/) and [github](https://github.com/pytorch/pytorch) sites. + + If you still can't get an answer to your question then as always, post on Piazza and/or come to the lab sessions. + diff --git a/notes/quota-issue.md b/notes/quota-issue.md index cd1cb7c7..db09687a 100644 --- a/notes/quota-issue.md +++ b/notes/quota-issue.md @@ -17,13 +17,13 @@ this should clean out the old partially installed packages and reinstall them fr Your homespace can be accessed from any Informatics computer running DICE (e.g. any of the computers in the [Forrest Hill labs](http://web.inf.ed.ac.uk/infweb/student-services/ito/students/year2/student-support/facilities/computer-labs) which are open-access outside of booked lab sessions or for those who know how to use SSH you can [log in remotely](http://computing.help.inf.ed.ac.uk/external-login)). You can therefore finish your environment set up prior to the next lab if you want though it is also fine to wait till the beginning of the next lab (it will take around 5 minutes to complete the installation). -At this point assuming you ran through the rest of the instructions to clone the Git repository to your homespace and install the `mlp` package (i.e. the instructions from [here](https://github.com/VICO-UoE/mlpractical/tree/mlp2016-7/lab1/notes/environment-set-up.md#getting-the-course-code-and-a-short-introduction-to-git) on-wards), you should have a fully working environment. +At this point assuming you ran through the rest of the instructions to clone the Git repository to your homespace and install the `mlp` package (i.e. the instructions from [here](https://github.com/CSTR-Edinburgh/mlpractical/blob/mlp2016-7/lab1/environment-set-up.md#getting-the-course-code-and-a-short-introduction-to-git) on-wards), you should have a fully working environment. Once your environment is set up in all future labs you will only need to activate it to get started. So at the beginning of each subsequent lab we will ask you to do something like the following ``` source activate mlp # Activate the mlp environment cd ~/mlpractical # Change the current directory to mlpractical repository -git checkout mlp2022-23/lab[...] # Checkout the branch for this week's lab +git checkout mlp2017-8/lab[...] # Checkout the branch for this week's lab jupyter notebook # Launch the notebook server ``` diff --git a/pytorch_mlp_framework/arg_extractor.py b/pytorch_mlp_framework/arg_extractor.py new file mode 100644 index 00000000..039f2554 --- /dev/null +++ b/pytorch_mlp_framework/arg_extractor.py @@ -0,0 +1,53 @@ +import argparse + + +def str2bool(v): + if v.lower() in ('yes', 'true', 't', 'y', '1'): + return True + elif v.lower() in ('no', 'false', 'f', 'n', '0'): + return False + else: + raise argparse.ArgumentTypeError('Boolean value expected.') + + +def get_args(): + """ + Returns a namedtuple with arguments extracted from the command line. + :return: A namedtuple with arguments + """ + parser = argparse.ArgumentParser( + description='Welcome to the MLP course\'s Pytorch training and inference helper script') + + parser.add_argument('--batch_size', nargs="?", type=int, default=100, help='Batch_size for experiment') + parser.add_argument('--continue_from_epoch', nargs="?", type=int, default=-1, help='Epoch you want to continue training from while restarting an experiment') + parser.add_argument('--seed', nargs="?", type=int, default=7112018, + help='Seed to use for random number generator for experiment') + parser.add_argument('--image_num_channels', nargs="?", type=int, default=3, + help='The channel dimensionality of our image-data') + parser.add_argument('--image_height', nargs="?", type=int, default=32, help='Height of image data') + parser.add_argument('--image_width', nargs="?", type=int, default=32, help='Width of image data') + parser.add_argument('--num_stages', nargs="?", type=int, default=3, + help='Number of convolutional stages in the network. A stage is considered a sequence of ' + 'convolutional layers where the input volume remains the same in the spacial dimension and' + ' is always terminated by a dimensionality reduction stage') + parser.add_argument('--num_blocks_per_stage', nargs="?", type=int, default=5, + help='Number of convolutional blocks in each stage, not including the reduction stage.' + ' A convolutional block is made up of two convolutional layers activated using the ' + ' leaky-relu non-linearity') + parser.add_argument('--num_filters', nargs="?", type=int, default=16, + help='Number of convolutional filters per convolutional layer in the network (excluding ' + 'dimensionality reduction layers)') + parser.add_argument('--num_epochs', nargs="?", type=int, default=100, help='Total number of epochs for model training') + parser.add_argument('--num_classes', nargs="?", type=int, default=100, help='Number of classes in the dataset') + parser.add_argument('--experiment_name', nargs="?", type=str, default="exp_1", + help='Experiment name - to be used for building the experiment folder') + parser.add_argument('--use_gpu', nargs="?", type=str2bool, default=True, + help='A flag indicating whether we will use GPU acceleration or not') + parser.add_argument('--weight_decay_coefficient', nargs="?", type=float, default=0, + help='Weight decay to use for Adam') + parser.add_argument('--block_type', type=str, default='conv_block', + help='Type of convolutional blocks to use in our network ' + '(This argument will be useful in running experiments to debug your network)') + args = parser.parse_args() + print(args) + return args diff --git a/pytorch_mlp_framework/experiment_builder.py b/pytorch_mlp_framework/experiment_builder.py new file mode 100644 index 00000000..c0b5380c --- /dev/null +++ b/pytorch_mlp_framework/experiment_builder.py @@ -0,0 +1,326 @@ +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F +import tqdm +import os +import numpy as np +import time + +from pytorch_mlp_framework.storage_utils import save_statistics +from matplotlib import pyplot as plt +import matplotlib +matplotlib.rcParams.update({'font.size': 8}) + +class ExperimentBuilder(nn.Module): + def __init__(self, network_model, experiment_name, num_epochs, train_data, val_data, + test_data, weight_decay_coefficient, use_gpu, continue_from_epoch=-1): + """ + Initializes an ExperimentBuilder object. Such an object takes care of running training and evaluation of a deep net + on a given dataset. It also takes care of saving per epoch models and automatically inferring the best val model + to be used for evaluating the test set metrics. + :param network_model: A pytorch nn.Module which implements a network architecture. + :param experiment_name: The name of the experiment. This is used mainly for keeping track of the experiment and creating and directory structure that will be used to save logs, model parameters and other. + :param num_epochs: Total number of epochs to run the experiment + :param train_data: An object of the DataProvider type. Contains the training set. + :param val_data: An object of the DataProvider type. Contains the val set. + :param test_data: An object of the DataProvider type. Contains the test set. + :param weight_decay_coefficient: A float indicating the weight decay to use with the adam optimizer. + :param use_gpu: A boolean indicating whether to use a GPU or not. + :param continue_from_epoch: An int indicating whether we'll start from scrach (-1) or whether we'll reload a previously saved model of epoch 'continue_from_epoch' and continue training from there. + """ + super(ExperimentBuilder, self).__init__() + + + self.experiment_name = experiment_name + self.model = network_model + + if torch.cuda.device_count() > 1 and use_gpu: + self.device = torch.cuda.current_device() + self.model.to(self.device) + self.model = nn.DataParallel(module=self.model) + print('Use Multi GPU', self.device) + elif torch.cuda.device_count() == 1 and use_gpu: + self.device = torch.cuda.current_device() + self.model.to(self.device) # sends the model from the cpu to the gpu + print('Use GPU', self.device) + else: + print("use CPU") + self.device = torch.device('cpu') # sets the device to be CPU + print(self.device) + + print('here') + + self.model.reset_parameters() # re-initialize network parameters + self.train_data = train_data + self.val_data = val_data + self.test_data = test_data + + print('System learnable parameters') + num_conv_layers = 0 + num_linear_layers = 0 + total_num_parameters = 0 + for name, value in self.named_parameters(): + print(name, value.shape) + if all(item in name for item in ['conv', 'weight']): + num_conv_layers += 1 + if all(item in name for item in ['linear', 'weight']): + num_linear_layers += 1 + total_num_parameters += np.prod(value.shape) + + print('Total number of parameters', total_num_parameters) + print('Total number of conv layers', num_conv_layers) + print('Total number of linear layers', num_linear_layers) + + self.optimizer = optim.Adam(self.parameters(), amsgrad=False, + weight_decay=weight_decay_coefficient) + self.learning_rate_scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, + T_max=num_epochs, + eta_min=0.00002) + # Generate the directory names + self.experiment_folder = os.path.abspath(experiment_name) + self.experiment_logs = os.path.abspath(os.path.join(self.experiment_folder, "result_outputs")) + self.experiment_saved_models = os.path.abspath(os.path.join(self.experiment_folder, "saved_models")) + + # Set best models to be at 0 since we are just starting + self.best_val_model_idx = 0 + self.best_val_model_acc = 0. + + if not os.path.exists(self.experiment_folder): # If experiment directory does not exist + os.mkdir(self.experiment_folder) # create the experiment directory + os.mkdir(self.experiment_logs) # create the experiment log directory + os.mkdir(self.experiment_saved_models) # create the experiment saved models directory + + self.num_epochs = num_epochs + self.criterion = nn.CrossEntropyLoss().to(self.device) # send the loss computation to the GPU + + if continue_from_epoch == -2: # if continue from epoch is -2 then continue from latest saved model + self.state, self.best_val_model_idx, self.best_val_model_acc = self.load_model( + model_save_dir=self.experiment_saved_models, model_save_name="train_model", + model_idx='latest') # reload existing model from epoch and return best val model index + # and the best val acc of that model + self.starting_epoch = int(self.state['model_epoch']) + + elif continue_from_epoch > -1: # if continue from epoch is greater than -1 then + self.state, self.best_val_model_idx, self.best_val_model_acc = self.load_model( + model_save_dir=self.experiment_saved_models, model_save_name="train_model", + model_idx=continue_from_epoch) # reload existing model from epoch and return best val model index + # and the best val acc of that model + self.starting_epoch = continue_from_epoch + else: + self.state = dict() + self.starting_epoch = 0 + + def get_num_parameters(self): + total_num_params = 0 + for param in self.parameters(): + total_num_params += np.prod(param.shape) + + return total_num_params + + + def plot_func_def(self,all_grads, layers): + + + """ + Plot function definition to plot the average gradient with respect to the number of layers in the given model + :param all_grads: Gradients wrt weights for each layer in the model. + :param layers: Layer names corresponding to the model parameters + :return: plot for gradient flow + """ + plt.plot(all_grads, alpha=0.3, color="b") + plt.hlines(0, 0, len(all_grads)+1, linewidth=1, color="k" ) + plt.xticks(range(0,len(all_grads), 1), layers, rotation="vertical") + plt.xlim(xmin=0, xmax=len(all_grads)) + plt.xlabel("Layers") + plt.ylabel("Average Gradient") + plt.title("Gradient flow") + plt.grid(True) + plt.tight_layout() + + return plt + + + def plot_grad_flow(self, named_parameters): + """ + The function is being called in Line 298 of this file. + Receives the parameters of the model being trained. Returns plot of gradient flow for the given model parameters. + + """ + all_grads = [] + layers = [] + + """ + Complete the code in the block below to collect absolute mean of the gradients for each layer in all_grads with the layer names in layers. + """ + ######################################## + + + ######################################## + + + plt = self.plot_func_def(all_grads, layers) + + return plt + + + + + def run_train_iter(self, x, y): + + self.train() # sets model to training mode (in case batch normalization or other methods have different procedures for training and evaluation) + x, y = x.float().to(device=self.device), y.long().to( + device=self.device) # send data to device as torch tensors + out = self.model.forward(x) # forward the data in the model + + + loss = F.cross_entropy(input=out, target=y) # compute loss + + self.optimizer.zero_grad() # set all weight grads from previous training iters to 0 + loss.backward() # backpropagate to compute gradients for current iter loss + + self.learning_rate_scheduler.step(epoch=self.current_epoch) + self.optimizer.step() # update network parameters + _, predicted = torch.max(out.data, 1) # get argmax of predictions + accuracy = np.mean(list(predicted.eq(y.data).cpu())) # compute accuracy + return loss.cpu().data.numpy(), accuracy + + def run_evaluation_iter(self, x, y): + """ + Receives the inputs and targets for the model and runs an evaluation iterations. Returns loss and accuracy metrics. + :param x: The inputs to the model. A numpy array of shape batch_size, channels, height, width + :param y: The targets for the model. A numpy array of shape batch_size, num_classes + :return: the loss and accuracy for this batch + """ + self.eval() # sets the system to validation mode + x, y = x.float().to(device=self.device), y.long().to( + device=self.device) # convert data to pytorch tensors and send to the computation device + out = self.model.forward(x) # forward the data in the model + + loss = F.cross_entropy(input=out, target=y) # compute loss + + _, predicted = torch.max(out.data, 1) # get argmax of predictions + accuracy = np.mean(list(predicted.eq(y.data).cpu())) # compute accuracy + return loss.cpu().data.numpy(), accuracy + + def save_model(self, model_save_dir, model_save_name, model_idx, best_validation_model_idx, + best_validation_model_acc): + """ + Save the network parameter state and current best val epoch idx and best val accuracy. + :param model_save_name: Name to use to save model without the epoch index + :param model_idx: The index to save the model with. + :param best_validation_model_idx: The index of the best validation model to be stored for future use. + :param best_validation_model_acc: The best validation accuracy to be stored for use at test time. + :param model_save_dir: The directory to store the state at. + :param state: The dictionary containing the system state. + + """ + self.state['network'] = self.state_dict() # save network parameter and other variables. + self.state['best_val_model_idx'] = best_validation_model_idx # save current best val idx + self.state['best_val_model_acc'] = best_validation_model_acc # save current best val acc + torch.save(self.state, f=os.path.join(model_save_dir, "{}_{}".format(model_save_name, str( + model_idx)))) # save state at prespecified filepath + + def load_model(self, model_save_dir, model_save_name, model_idx): + """ + Load the network parameter state and the best val model idx and best val acc to be compared with the future val accuracies, in order to choose the best val model + :param model_save_dir: The directory to store the state at. + :param model_save_name: Name to use to save model without the epoch index + :param model_idx: The index to save the model with. + :return: best val idx and best val model acc, also it loads the network state into the system state without returning it + """ + state = torch.load(f=os.path.join(model_save_dir, "{}_{}".format(model_save_name, str(model_idx)))) + self.load_state_dict(state_dict=state['network']) + return state, state['best_val_model_idx'], state['best_val_model_acc'] + + def run_experiment(self): + """ + Runs experiment train and evaluation iterations, saving the model and best val model and val model accuracy after each epoch + :return: The summary current_epoch_losses from starting epoch to total_epochs. + """ + total_losses = {"train_acc": [], "train_loss": [], "val_acc": [], + "val_loss": []} # initialize a dict to keep the per-epoch metrics + for i, epoch_idx in enumerate(range(self.starting_epoch, self.num_epochs)): + epoch_start_time = time.time() + current_epoch_losses = {"train_acc": [], "train_loss": [], "val_acc": [], "val_loss": []} + self.current_epoch = epoch_idx + with tqdm.tqdm(total=len(self.train_data)) as pbar_train: # create a progress bar for training + for idx, (x, y) in enumerate(self.train_data): # get data batches + loss, accuracy = self.run_train_iter(x=x, y=y) # take a training iter step + current_epoch_losses["train_loss"].append(loss) # add current iter loss to the train loss list + current_epoch_losses["train_acc"].append(accuracy) # add current iter acc to the train acc list + pbar_train.update(1) + pbar_train.set_description("loss: {:.4f}, accuracy: {:.4f}".format(loss, accuracy)) + + with tqdm.tqdm(total=len(self.val_data)) as pbar_val: # create a progress bar for validation + for x, y in self.val_data: # get data batches + loss, accuracy = self.run_evaluation_iter(x=x, y=y) # run a validation iter + current_epoch_losses["val_loss"].append(loss) # add current iter loss to val loss list. + current_epoch_losses["val_acc"].append(accuracy) # add current iter acc to val acc lst. + pbar_val.update(1) # add 1 step to the progress bar + pbar_val.set_description("loss: {:.4f}, accuracy: {:.4f}".format(loss, accuracy)) + val_mean_accuracy = np.mean(current_epoch_losses['val_acc']) + if val_mean_accuracy > self.best_val_model_acc: # if current epoch's mean val acc is greater than the saved best val acc then + self.best_val_model_acc = val_mean_accuracy # set the best val model acc to be current epoch's val accuracy + self.best_val_model_idx = epoch_idx # set the experiment-wise best val idx to be the current epoch's idx + + for key, value in current_epoch_losses.items(): + total_losses[key].append(np.mean( + value)) # get mean of all metrics of current epoch metrics dict, to get them ready for storage and output on the terminal. + + save_statistics(experiment_log_dir=self.experiment_logs, filename='summary.csv', + stats_dict=total_losses, current_epoch=i, + continue_from_mode=True if (self.starting_epoch != 0 or i > 0) else False) # save statistics to stats file. + + # load_statistics(experiment_log_dir=self.experiment_logs, filename='summary.csv') # How to load a csv file if you need to + + out_string = "_".join( + ["{}_{:.4f}".format(key, np.mean(value)) for key, value in current_epoch_losses.items()]) + # create a string to use to report our epoch metrics + epoch_elapsed_time = time.time() - epoch_start_time # calculate time taken for epoch + epoch_elapsed_time = "{:.4f}".format(epoch_elapsed_time) + print("Epoch {}:".format(epoch_idx), out_string, "epoch time", epoch_elapsed_time, "seconds") + self.state['model_epoch'] = epoch_idx + self.save_model(model_save_dir=self.experiment_saved_models, + # save model and best val idx and best val acc, using the model dir, model name and model idx + model_save_name="train_model", model_idx=epoch_idx, + best_validation_model_idx=self.best_val_model_idx, + best_validation_model_acc=self.best_val_model_acc) + self.save_model(model_save_dir=self.experiment_saved_models, + # save model and best val idx and best val acc, using the model dir, model name and model idx + model_save_name="train_model", model_idx='latest', + best_validation_model_idx=self.best_val_model_idx, + best_validation_model_acc=self.best_val_model_acc) + + ################################################################ + ##### Plot Gradient Flow at each Epoch during Training ###### + print("Generating Gradient Flow Plot at epoch {}".format(epoch_idx)) + plt = self.plot_grad_flow(self.model.named_parameters()) + if not os.path.exists(os.path.join(self.experiment_saved_models, 'gradient_flow_plots')): + os.mkdir(os.path.join(self.experiment_saved_models, 'gradient_flow_plots')) + # plt.legend(loc="best") + plt.savefig(os.path.join(self.experiment_saved_models, 'gradient_flow_plots', "epoch{}.pdf".format(str(epoch_idx)))) + ################################################################ + + print("Generating test set evaluation metrics") + self.load_model(model_save_dir=self.experiment_saved_models, model_idx=self.best_val_model_idx, + # load best validation model + model_save_name="train_model") + current_epoch_losses = {"test_acc": [], "test_loss": []} # initialize a statistics dict + with tqdm.tqdm(total=len(self.test_data)) as pbar_test: # ini a progress bar + for x, y in self.test_data: # sample batch + loss, accuracy = self.run_evaluation_iter(x=x, + y=y) # compute loss and accuracy by running an evaluation step + current_epoch_losses["test_loss"].append(loss) # save test loss + current_epoch_losses["test_acc"].append(accuracy) # save test accuracy + pbar_test.update(1) # update progress bar status + pbar_test.set_description( + "loss: {:.4f}, accuracy: {:.4f}".format(loss, accuracy)) # update progress bar string output + + test_losses = {key: [np.mean(value)] for key, value in + current_epoch_losses.items()} # save test set metrics in dict format + save_statistics(experiment_log_dir=self.experiment_logs, filename='test_summary.csv', + # save test set metrics on disk in .csv format + stats_dict=test_losses, current_epoch=0, continue_from_mode=False) + + return total_losses, test_losses diff --git a/pytorch_mlp_framework/model_architectures.py b/pytorch_mlp_framework/model_architectures.py new file mode 100644 index 00000000..cfa991de --- /dev/null +++ b/pytorch_mlp_framework/model_architectures.py @@ -0,0 +1,340 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class FCCNetwork(nn.Module): + def __init__(self, input_shape, num_output_classes, num_filters, num_layers, use_bias=False): + """ + Initializes a fully connected network similar to the ones implemented previously in the MLP package. + :param input_shape: The shape of the inputs going in to the network. + :param num_output_classes: The number of outputs the network should have (for classification those would be the number of classes) + :param num_filters: Number of filters used in every fcc layer. + :param num_layers: Number of fcc layers (excluding dim reduction stages) + :param use_bias: Whether our fcc layers will use a bias. + """ + super(FCCNetwork, self).__init__() + # set up class attributes useful in building the network and inference + self.input_shape = input_shape + self.num_filters = num_filters + self.num_output_classes = num_output_classes + self.use_bias = use_bias + self.num_layers = num_layers + # initialize a module dict, which is effectively a dictionary that can collect layers and integrate them into pytorch + self.layer_dict = nn.ModuleDict() + # build the network + self.build_module() + + def build_module(self): + print("Building basic block of FCCNetwork using input shape", self.input_shape) + x = torch.zeros((self.input_shape)) + + out = x + out = out.view(out.shape[0], -1) + # flatten inputs to shape (b, -1) where -1 is the dim resulting from multiplying the + # shapes of all dimensions after the 0th dim + + for i in range(self.num_layers): + self.layer_dict['fcc_{}'.format(i)] = nn.Linear(in_features=out.shape[1], # initialize a fcc layer + out_features=self.num_filters, + bias=self.use_bias) + + out = self.layer_dict['fcc_{}'.format(i)](out) # apply ith fcc layer to the previous layers outputs + out = F.relu(out) # apply a ReLU on the outputs + + self.logits_linear_layer = nn.Linear(in_features=out.shape[1], # initialize the prediction output linear layer + out_features=self.num_output_classes, + bias=self.use_bias) + out = self.logits_linear_layer(out) # apply the layer to the previous layer's outputs + print("Block is built, output volume is", out.shape) + return out + + def forward(self, x): + """ + Forward prop data through the network and return the preds + :param x: Input batch x a batch of shape batch number of samples, each of any dimensionality. + :return: preds of shape (b, num_classes) + """ + out = x + out = out.view(out.shape[0], -1) + # flatten inputs to shape (b, -1) where -1 is the dim resulting from multiplying the + # shapes of all dimensions after the 0th dim + + for i in range(self.num_layers): + out = self.layer_dict['fcc_{}'.format(i)](out) # apply ith fcc layer to the previous layers outputs + out = F.relu(out) # apply a ReLU on the outputs + + out = self.logits_linear_layer(out) # apply the layer to the previous layer's outputs + return out + + def reset_parameters(self): + """ + Re-initializes the networks parameters + """ + for item in self.layer_dict.children(): + item.reset_parameters() + + self.logits_linear_layer.reset_parameters() + + +class EmptyBlock(nn.Module): + def __init__(self, input_shape=None, num_filters=None, kernel_size=None, padding=None, bias=None, dilation=None, + reduction_factor=None): + super(EmptyBlock, self).__init__() + + self.num_filters = num_filters + self.kernel_size = kernel_size + self.input_shape = input_shape + self.padding = padding + self.bias = bias + self.dilation = dilation + + self.build_module() + + def build_module(self): + self.layer_dict = nn.ModuleDict() + x = torch.zeros(self.input_shape) + self.layer_dict['Identity'] = nn.Identity() + + def forward(self, x): + out = x + + out = self.layer_dict['Identity'].forward(out) + + return out + + +class EntryConvolutionalBlock(nn.Module): + def __init__(self, input_shape, num_filters, kernel_size, padding, bias, dilation): + super(EntryConvolutionalBlock, self).__init__() + + self.num_filters = num_filters + self.kernel_size = kernel_size + self.input_shape = input_shape + self.padding = padding + self.bias = bias + self.dilation = dilation + + self.build_module() + + def build_module(self): + self.layer_dict = nn.ModuleDict() + x = torch.zeros(self.input_shape) + out = x + + self.layer_dict['conv_0'] = nn.Conv2d(in_channels=out.shape[1], out_channels=self.num_filters, bias=self.bias, + kernel_size=self.kernel_size, dilation=self.dilation, + padding=self.padding, stride=1) + + out = self.layer_dict['conv_0'].forward(out) + self.layer_dict['bn_0'] = nn.BatchNorm2d(num_features=out.shape[1]) + out = F.leaky_relu(self.layer_dict['bn_0'].forward(out)) + + print(out.shape) + + def forward(self, x): + out = x + + out = self.layer_dict['conv_0'].forward(out) + out = F.leaky_relu(self.layer_dict['bn_0'].forward(out)) + + return out + + +class ConvolutionalProcessingBlock(nn.Module): + def __init__(self, input_shape, num_filters, kernel_size, padding, bias, dilation): + super(ConvolutionalProcessingBlock, self).__init__() + + self.num_filters = num_filters + self.kernel_size = kernel_size + self.input_shape = input_shape + self.padding = padding + self.bias = bias + self.dilation = dilation + + self.build_module() + + def build_module(self): + self.layer_dict = nn.ModuleDict() + x = torch.zeros(self.input_shape) + out = x + + self.layer_dict['conv_0'] = nn.Conv2d(in_channels=out.shape[1], out_channels=self.num_filters, bias=self.bias, + kernel_size=self.kernel_size, dilation=self.dilation, + padding=self.padding, stride=1) + + out = self.layer_dict['conv_0'].forward(out) + out = F.leaky_relu(out) + + self.layer_dict['conv_1'] = nn.Conv2d(in_channels=out.shape[1], out_channels=self.num_filters, bias=self.bias, + kernel_size=self.kernel_size, dilation=self.dilation, + padding=self.padding, stride=1) + + out = self.layer_dict['conv_1'].forward(out) + out = F.leaky_relu(out) + + print(out.shape) + + def forward(self, x): + out = x + + out = self.layer_dict['conv_0'].forward(out) + out = F.leaky_relu(out) + + out = self.layer_dict['conv_1'].forward(out) + out = F.leaky_relu(out) + + return out + + +class ConvolutionalDimensionalityReductionBlock(nn.Module): + def __init__(self, input_shape, num_filters, kernel_size, padding, bias, dilation, reduction_factor): + super(ConvolutionalDimensionalityReductionBlock, self).__init__() + + self.num_filters = num_filters + self.kernel_size = kernel_size + self.input_shape = input_shape + self.padding = padding + self.bias = bias + self.dilation = dilation + self.reduction_factor = reduction_factor + self.build_module() + + def build_module(self): + self.layer_dict = nn.ModuleDict() + x = torch.zeros(self.input_shape) + out = x + + self.layer_dict['conv_0'] = nn.Conv2d(in_channels=out.shape[1], out_channels=self.num_filters, bias=self.bias, + kernel_size=self.kernel_size, dilation=self.dilation, + padding=self.padding, stride=1) + + out = self.layer_dict['conv_0'].forward(out) + out = F.leaky_relu(out) + + out = F.avg_pool2d(out, self.reduction_factor) + + self.layer_dict['conv_1'] = nn.Conv2d(in_channels=out.shape[1], out_channels=self.num_filters, bias=self.bias, + kernel_size=self.kernel_size, dilation=self.dilation, + padding=self.padding, stride=1) + + out = self.layer_dict['conv_1'].forward(out) + out = F.leaky_relu(out) + + print(out.shape) + + def forward(self, x): + out = x + + out = self.layer_dict['conv_0'].forward(out) + out = F.leaky_relu(out) + + out = F.avg_pool2d(out, self.reduction_factor) + + out = self.layer_dict['conv_1'].forward(out) + out = F.leaky_relu(out) + + return out + + +class ConvolutionalNetwork(nn.Module): + def __init__(self, input_shape, num_output_classes, num_filters, + num_blocks_per_stage, num_stages, use_bias=False, processing_block_type=ConvolutionalProcessingBlock, + dimensionality_reduction_block_type=ConvolutionalDimensionalityReductionBlock): + """ + Initializes a convolutional network module + :param input_shape: The shape of the tensor to be passed into this network + :param num_output_classes: Number of output classes + :param num_filters: Number of filters per convolutional layer + :param num_blocks_per_stage: Number of blocks per "stage". Each block is composed of 2 convolutional layers. + :param num_stages: Number of stages in a network. A stage is defined as a sequence of layers within which the + data dimensionality remains constant in the spacial axis (h, w) and can change in the channel axis. After each stage + there exists a dimensionality reduction stage, composed of two convolutional layers and an avg pooling layer. + :param use_bias: Whether to use biases in our convolutional layers + :param processing_block_type: Type of processing block to use within our stages + :param dimensionality_reduction_block_type: Type of dimensionality reduction block to use after each stage in our network + """ + super(ConvolutionalNetwork, self).__init__() + # set up class attributes useful in building the network and inference + self.input_shape = input_shape + self.num_filters = num_filters + self.num_output_classes = num_output_classes + self.use_bias = use_bias + self.num_blocks_per_stage = num_blocks_per_stage + self.num_stages = num_stages + self.processing_block_type = processing_block_type + self.dimensionality_reduction_block_type = dimensionality_reduction_block_type + + # build the network + self.build_module() + + def build_module(self): + """ + Builds network whilst automatically inferring shapes of layers. + """ + self.layer_dict = nn.ModuleDict() + # initialize a module dict, which is effectively a dictionary that can collect layers and integrate them into pytorch + print("Building basic block of ConvolutionalNetwork using input shape", self.input_shape) + x = torch.zeros((self.input_shape)) # create dummy inputs to be used to infer shapes of layers + + out = x + self.layer_dict['input_conv'] = EntryConvolutionalBlock(input_shape=out.shape, num_filters=self.num_filters, + kernel_size=3, padding=1, bias=self.use_bias, + dilation=1) + out = self.layer_dict['input_conv'].forward(out) + # torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True) + for i in range(self.num_stages): # for number of layers times + for j in range(self.num_blocks_per_stage): + self.layer_dict['block_{}_{}'.format(i, j)] = self.processing_block_type(input_shape=out.shape, + num_filters=self.num_filters, + bias=self.use_bias, + kernel_size=3, dilation=1, + padding=1) + out = self.layer_dict['block_{}_{}'.format(i, j)].forward(out) + self.layer_dict['reduction_block_{}'.format(i)] = self.dimensionality_reduction_block_type( + input_shape=out.shape, + num_filters=self.num_filters, bias=True, + kernel_size=3, dilation=1, + padding=1, + reduction_factor=2) + out = self.layer_dict['reduction_block_{}'.format(i)].forward(out) + + out = F.avg_pool2d(out, out.shape[-1]) + print('shape before final linear layer', out.shape) + out = out.view(out.shape[0], -1) + self.logit_linear_layer = nn.Linear(in_features=out.shape[1], # add a linear layer + out_features=self.num_output_classes, + bias=True) + out = self.logit_linear_layer(out) # apply linear layer on flattened inputs + print("Block is built, output volume is", out.shape) + return out + + def forward(self, x): + """ + Forward propages the network given an input batch + :param x: Inputs x (b, c, h, w) + :return: preds (b, num_classes) + """ + out = x + out = self.layer_dict['input_conv'].forward(out) + for i in range(self.num_stages): # for number of layers times + for j in range(self.num_blocks_per_stage): + out = self.layer_dict['block_{}_{}'.format(i, j)].forward(out) + out = self.layer_dict['reduction_block_{}'.format(i)].forward(out) + + out = F.avg_pool2d(out, out.shape[-1]) + out = out.view(out.shape[0], -1) # flatten outputs from (b, c, h, w) to (b, c*h*w) + out = self.logit_linear_layer(out) # pass through a linear layer to get logits/preds + return out + + def reset_parameters(self): + """ + Re-initialize the network parameters. + """ + for item in self.layer_dict.children(): + try: + item.reset_parameters() + except: + pass + + self.logit_linear_layer.reset_parameters() diff --git a/pytorch_mlp_framework/storage_utils.py b/pytorch_mlp_framework/storage_utils.py new file mode 100644 index 00000000..33fafdc3 --- /dev/null +++ b/pytorch_mlp_framework/storage_utils.py @@ -0,0 +1,70 @@ +import pickle +import os +import csv + + +def save_to_stats_pkl_file(experiment_log_filepath, filename, stats_dict): + summary_filename = os.path.join(experiment_log_filepath, filename) + with open("{}.pkl".format(summary_filename), "wb") as file_writer: + pickle.dump(stats_dict, file_writer) + + +def load_from_stats_pkl_file(experiment_log_filepath, filename): + summary_filename = os.path.join(experiment_log_filepath, filename) + with open("{}.pkl".format(summary_filename), "rb") as file_reader: + stats = pickle.load(file_reader) + + return stats + + +def save_statistics(experiment_log_dir, filename, stats_dict, current_epoch, continue_from_mode=False, save_full_dict=False): + """ + Saves the statistics in stats dict into a csv file. Using the keys as the header entries and the values as the + columns of a particular header entry + :param experiment_log_dir: the log folder dir filepath + :param filename: the name of the csv file + :param stats_dict: the stats dict containing the data to be saved + :param current_epoch: the number of epochs since commencement of the current training session (i.e. if the experiment continued from 100 and this is epoch 105, then pass relative distance of 5.) + :param save_full_dict: whether to save the full dict as is overriding any previous entries (might be useful if we want to overwrite a file) + :return: The filepath to the summary file + """ + summary_filename = os.path.join(experiment_log_dir, filename) + mode = 'a' if continue_from_mode else 'w' + with open(summary_filename, mode) as f: + writer = csv.writer(f) + if not continue_from_mode: + writer.writerow(list(stats_dict.keys())) + + if save_full_dict: + total_rows = len(list(stats_dict.values())[0]) + for idx in range(total_rows): + row_to_add = [value[idx] for value in list(stats_dict.values())] + writer.writerow(row_to_add) + else: + row_to_add = [value[current_epoch] for value in list(stats_dict.values())] + writer.writerow(row_to_add) + + return summary_filename + + +def load_statistics(experiment_log_dir, filename): + """ + Loads a statistics csv file into a dictionary + :param experiment_log_dir: the log folder dir filepath + :param filename: the name of the csv file to load + :return: A dictionary containing the stats in the csv file. Header entries are converted into keys and columns of a + particular header are converted into values of a key in a list format. + """ + summary_filename = os.path.join(experiment_log_dir, filename) + + with open(summary_filename, 'r+') as f: + lines = f.readlines() + + keys = lines[0].split(",") + stats = {key: [] for key in keys} + for line in lines[1:]: + values = line.split(",") + for idx, value in enumerate(values): + stats[keys[idx]].append(value) + + return stats diff --git a/pytorch_mlp_framework/train_evaluate_image_classification_system.py b/pytorch_mlp_framework/train_evaluate_image_classification_system.py new file mode 100644 index 00000000..a8b49957 --- /dev/null +++ b/pytorch_mlp_framework/train_evaluate_image_classification_system.py @@ -0,0 +1,68 @@ +import numpy as np +import torch +from torch.utils.data import DataLoader +from torchvision import transforms + +import mlp.data_providers as data_providers +from pytorch_mlp_framework.arg_extractor import get_args +from pytorch_mlp_framework.experiment_builder import ExperimentBuilder +from pytorch_mlp_framework.model_architectures import * +import os +# os.environ["CUDA_VISIBLE_DEVICES"]="0" + +args = get_args() # get arguments from command line +rng = np.random.RandomState(seed=args.seed) # set the seeds for the experiment +torch.manual_seed(seed=args.seed) # sets pytorch's seed + +# set up data augmentation transforms for training and testing +transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + +transform_test = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), +]) + +train_data = data_providers.CIFAR100(root='data', set_name='train', + transform=transform_train, + download=True) # initialize our rngs using the argument set seed +val_data = data_providers.CIFAR100(root='data', set_name='val', + transform=transform_test, + download=True) # initialize our rngs using the argument set seed +test_data = data_providers.CIFAR100(root='data', set_name='test', + transform=transform_test, + download=True) # initialize our rngs using the argument set seed + +train_data_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=4) +val_data_loader = DataLoader(val_data, batch_size=args.batch_size, shuffle=True, num_workers=4) +test_data_loader = DataLoader(test_data, batch_size=args.batch_size, shuffle=True, num_workers=4) + +if args.block_type == 'conv_block': + processing_block_type = ConvolutionalProcessingBlock + dim_reduction_block_type = ConvolutionalDimensionalityReductionBlock +elif args.block_type == 'empty_block': + processing_block_type = EmptyBlock + dim_reduction_block_type = EmptyBlock +else: + raise ModuleNotFoundError + +custom_conv_net = ConvolutionalNetwork( # initialize our network object, in this case a ConvNet + input_shape=(args.batch_size, args.image_num_channels, args.image_height, args.image_width), + num_output_classes=args.num_classes, num_filters=args.num_filters, use_bias=False, + num_blocks_per_stage=args.num_blocks_per_stage, num_stages=args.num_stages, + processing_block_type=processing_block_type, + dimensionality_reduction_block_type=dim_reduction_block_type) + +conv_experiment = ExperimentBuilder(network_model=custom_conv_net, + experiment_name=args.experiment_name, + num_epochs=args.num_epochs, + weight_decay_coefficient=args.weight_decay_coefficient, + use_gpu=args.use_gpu, + continue_from_epoch=args.continue_from_epoch, + train_data=train_data_loader, val_data=val_data_loader, + test_data=test_data_loader) # build an experiment object +experiment_metrics, test_metrics = conv_experiment.run_experiment() # run experiment and return experiment metrics diff --git a/report/README.txt b/report/README.txt new file mode 100644 index 00000000..8d6cff60 --- /dev/null +++ b/report/README.txt @@ -0,0 +1 @@ +Most reasonable LaTeX distributions should have no problem building the document from what is in the provided LaTeX source directory. However certain LaTeX distributions are missing certain files, and the they are included in this directory. If you get an error message when you build the LaTeX document saying one of these files is missing, then move the relevant file into your latex source directory. diff --git a/report/additional-latex-files/README.txt b/report/additional-latex-files/README.txt new file mode 100644 index 00000000..8d6cff60 --- /dev/null +++ b/report/additional-latex-files/README.txt @@ -0,0 +1 @@ +Most reasonable LaTeX distributions should have no problem building the document from what is in the provided LaTeX source directory. However certain LaTeX distributions are missing certain files, and the they are included in this directory. If you get an error message when you build the LaTeX document saying one of these files is missing, then move the relevant file into your latex source directory. diff --git a/report/additional-latex-files/algorithm.sty b/report/additional-latex-files/algorithm.sty new file mode 100644 index 00000000..843e3d5b --- /dev/null +++ b/report/additional-latex-files/algorithm.sty @@ -0,0 +1,79 @@ +% ALGORITHM STYLE -- Released 8 April 1996 +% for LaTeX-2e +% Copyright -- 1994 Peter Williams +% E-mail Peter.Williams@dsto.defence.gov.au +\NeedsTeXFormat{LaTeX2e} +\ProvidesPackage{algorithm} +\typeout{Document Style `algorithm' - floating environment} + +\RequirePackage{float} +\RequirePackage{ifthen} +\newcommand{\ALG@within}{nothing} +\newboolean{ALG@within} +\setboolean{ALG@within}{false} +\newcommand{\ALG@floatstyle}{ruled} +\newcommand{\ALG@name}{Algorithm} +\newcommand{\listalgorithmname}{List of \ALG@name s} + +% Declare Options +% first appearance +\DeclareOption{plain}{ + \renewcommand{\ALG@floatstyle}{plain} +} +\DeclareOption{ruled}{ + \renewcommand{\ALG@floatstyle}{ruled} +} +\DeclareOption{boxed}{ + \renewcommand{\ALG@floatstyle}{boxed} +} +% then numbering convention +\DeclareOption{part}{ + \renewcommand{\ALG@within}{part} + \setboolean{ALG@within}{true} +} +\DeclareOption{chapter}{ + \renewcommand{\ALG@within}{chapter} + \setboolean{ALG@within}{true} +} +\DeclareOption{section}{ + \renewcommand{\ALG@within}{section} + \setboolean{ALG@within}{true} +} +\DeclareOption{subsection}{ + \renewcommand{\ALG@within}{subsection} + \setboolean{ALG@within}{true} +} +\DeclareOption{subsubsection}{ + \renewcommand{\ALG@within}{subsubsection} + \setboolean{ALG@within}{true} +} +\DeclareOption{nothing}{ + \renewcommand{\ALG@within}{nothing} + \setboolean{ALG@within}{true} +} +\DeclareOption*{\edef\ALG@name{\CurrentOption}} + +% ALGORITHM +% +\ProcessOptions +\floatstyle{\ALG@floatstyle} +\ifthenelse{\boolean{ALG@within}}{ + \ifthenelse{\equal{\ALG@within}{part}} + {\newfloat{algorithm}{htbp}{loa}[part]}{} + \ifthenelse{\equal{\ALG@within}{chapter}} + {\newfloat{algorithm}{htbp}{loa}[chapter]}{} + \ifthenelse{\equal{\ALG@within}{section}} + {\newfloat{algorithm}{htbp}{loa}[section]}{} + \ifthenelse{\equal{\ALG@within}{subsection}} + {\newfloat{algorithm}{htbp}{loa}[subsection]}{} + \ifthenelse{\equal{\ALG@within}{subsubsection}} + {\newfloat{algorithm}{htbp}{loa}[subsubsection]}{} + \ifthenelse{\equal{\ALG@within}{nothing}} + {\newfloat{algorithm}{htbp}{loa}}{} +}{ + \newfloat{algorithm}{htbp}{loa} +} +\floatname{algorithm}{\ALG@name} + +\newcommand{\listofalgorithms}{\listof{algorithm}{\listalgorithmname}} + diff --git a/report/additional-latex-files/algorithmic.sty b/report/additional-latex-files/algorithmic.sty new file mode 100644 index 00000000..ad614783 --- /dev/null +++ b/report/additional-latex-files/algorithmic.sty @@ -0,0 +1,201 @@ +% ALGORITHMIC STYLE -- Released 8 APRIL 1996 +% for LaTeX version 2e +% Copyright -- 1994 Peter Williams +% E-mail PeterWilliams@dsto.defence.gov.au +% +% Modified by Alex Smola (08/2000) +% E-mail Alex.Smola@anu.edu.au +% +\NeedsTeXFormat{LaTeX2e} +\ProvidesPackage{algorithmic} +\typeout{Document Style `algorithmic' - environment} +% +\RequirePackage{ifthen} +\RequirePackage{calc} +\newboolean{ALC@noend} +\setboolean{ALC@noend}{false} +\newcounter{ALC@line} +\newcounter{ALC@rem} +\newlength{\ALC@tlm} +% +\DeclareOption{noend}{\setboolean{ALC@noend}{true}} +% +\ProcessOptions +% +% ALGORITHMIC +\newcommand{\algorithmicrequire}{\textbf{Require:}} +\newcommand{\algorithmicensure}{\textbf{Ensure:}} +\newcommand{\algorithmiccomment}[1]{\{#1\}} +\newcommand{\algorithmicend}{\textbf{end}} +\newcommand{\algorithmicif}{\textbf{if}} +\newcommand{\algorithmicthen}{\textbf{then}} +\newcommand{\algorithmicelse}{\textbf{else}} +\newcommand{\algorithmicelsif}{\algorithmicelse\ \algorithmicif} +\newcommand{\algorithmicendif}{\algorithmicend\ \algorithmicif} +\newcommand{\algorithmicfor}{\textbf{for}} +\newcommand{\algorithmicforall}{\textbf{for all}} +\newcommand{\algorithmicdo}{\textbf{do}} +\newcommand{\algorithmicendfor}{\algorithmicend\ \algorithmicfor} +\newcommand{\algorithmicwhile}{\textbf{while}} +\newcommand{\algorithmicendwhile}{\algorithmicend\ \algorithmicwhile} +\newcommand{\algorithmicloop}{\textbf{loop}} +\newcommand{\algorithmicendloop}{\algorithmicend\ \algorithmicloop} +\newcommand{\algorithmicrepeat}{\textbf{repeat}} +\newcommand{\algorithmicuntil}{\textbf{until}} + +%changed by alex smola +\newcommand{\algorithmicinput}{\textbf{input}} +\newcommand{\algorithmicoutput}{\textbf{output}} +\newcommand{\algorithmicset}{\textbf{set}} +\newcommand{\algorithmictrue}{\textbf{true}} +\newcommand{\algorithmicfalse}{\textbf{false}} +\newcommand{\algorithmicand}{\textbf{and\ }} +\newcommand{\algorithmicor}{\textbf{or\ }} +\newcommand{\algorithmicfunction}{\textbf{function}} +\newcommand{\algorithmicendfunction}{\algorithmicend\ \algorithmicfunction} +\newcommand{\algorithmicmain}{\textbf{main}} +\newcommand{\algorithmicendmain}{\algorithmicend\ \algorithmicmain} +%end changed by alex smola + +\def\ALC@item[#1]{% +\if@noparitem \@donoparitem + \else \if@inlabel \indent \par \fi + \ifhmode \unskip\unskip \par \fi + \if@newlist \if@nobreak \@nbitem \else + \addpenalty\@beginparpenalty + \addvspace\@topsep \addvspace{-\parskip}\fi + \else \addpenalty\@itempenalty \addvspace\itemsep + \fi + \global\@inlabeltrue +\fi +\everypar{\global\@minipagefalse\global\@newlistfalse + \if@inlabel\global\@inlabelfalse \hskip -\parindent \box\@labels + \penalty\z@ \fi + \everypar{}}\global\@nobreakfalse +\if@noitemarg \@noitemargfalse \if@nmbrlist \refstepcounter{\@listctr}\fi \fi +\sbox\@tempboxa{\makelabel{#1}}% +\global\setbox\@labels + \hbox{\unhbox\@labels \hskip \itemindent + \hskip -\labelwidth \hskip -\ALC@tlm + \ifdim \wd\@tempboxa >\labelwidth + \box\@tempboxa + \else \hbox to\labelwidth {\unhbox\@tempboxa}\fi + \hskip \ALC@tlm}\ignorespaces} +% +\newenvironment{algorithmic}[1][0]{ +\let\@item\ALC@item + \newcommand{\ALC@lno}{% +\ifthenelse{\equal{\arabic{ALC@rem}}{0}} +{{\footnotesize \arabic{ALC@line}:}}{}% +} +\let\@listii\@listi +\let\@listiii\@listi +\let\@listiv\@listi +\let\@listv\@listi +\let\@listvi\@listi +\let\@listvii\@listi + \newenvironment{ALC@g}{ + \begin{list}{\ALC@lno}{ \itemsep\z@ \itemindent\z@ + \listparindent\z@ \rightmargin\z@ + \topsep\z@ \partopsep\z@ \parskip\z@\parsep\z@ + \leftmargin 1em + \addtolength{\ALC@tlm}{\leftmargin} + } + } + {\end{list}} + \newcommand{\ALC@it}{\addtocounter{ALC@line}{1}\addtocounter{ALC@rem}{1}\ifthenelse{\equal{\arabic{ALC@rem}}{#1}}{\setcounter{ALC@rem}{0}}{}\item} + \newcommand{\ALC@com}[1]{\ifthenelse{\equal{##1}{default}}% +{}{\ \algorithmiccomment{##1}}} + \newcommand{\REQUIRE}{\item[\algorithmicrequire]} + \newcommand{\ENSURE}{\item[\algorithmicensure]} + \newcommand{\STATE}{\ALC@it} + \newcommand{\COMMENT}[1]{\algorithmiccomment{##1}} +%changes by alex smola + \newcommand{\INPUT}{\item[\algorithmicinput]} + \newcommand{\OUTPUT}{\item[\algorithmicoutput]} + \newcommand{\SET}{\item[\algorithmicset]} +% \newcommand{\TRUE}{\algorithmictrue} +% \newcommand{\FALSE}{\algorithmicfalse} + \newcommand{\AND}{\algorithmicand} + \newcommand{\OR}{\algorithmicor} + \newenvironment{ALC@func}{\begin{ALC@g}}{\end{ALC@g}} + \newenvironment{ALC@main}{\begin{ALC@g}}{\end{ALC@g}} +%end changes by alex smola + \newenvironment{ALC@if}{\begin{ALC@g}}{\end{ALC@g}} + \newenvironment{ALC@for}{\begin{ALC@g}}{\end{ALC@g}} + \newenvironment{ALC@whl}{\begin{ALC@g}}{\end{ALC@g}} + \newenvironment{ALC@loop}{\begin{ALC@g}}{\end{ALC@g}} + \newenvironment{ALC@rpt}{\begin{ALC@g}}{\end{ALC@g}} + \renewcommand{\\}{\@centercr} + \newcommand{\IF}[2][default]{\ALC@it\algorithmicif\ ##2\ \algorithmicthen% +\ALC@com{##1}\begin{ALC@if}} + \newcommand{\SHORTIF}[2]{\ALC@it\algorithmicif\ ##1\ + \algorithmicthen\ {##2}} + \newcommand{\ELSE}[1][default]{\end{ALC@if}\ALC@it\algorithmicelse% +\ALC@com{##1}\begin{ALC@if}} + \newcommand{\ELSIF}[2][default]% +{\end{ALC@if}\ALC@it\algorithmicelsif\ ##2\ \algorithmicthen% +\ALC@com{##1}\begin{ALC@if}} + \newcommand{\FOR}[2][default]{\ALC@it\algorithmicfor\ ##2\ \algorithmicdo% +\ALC@com{##1}\begin{ALC@for}} + \newcommand{\FORALL}[2][default]{\ALC@it\algorithmicforall\ ##2\ % +\algorithmicdo% +\ALC@com{##1}\begin{ALC@for}} + \newcommand{\SHORTFORALL}[2]{\ALC@it\algorithmicforall\ ##1\ % + \algorithmicdo\ {##2}} + \newcommand{\WHILE}[2][default]{\ALC@it\algorithmicwhile\ ##2\ % +\algorithmicdo% +\ALC@com{##1}\begin{ALC@whl}} + \newcommand{\LOOP}[1][default]{\ALC@it\algorithmicloop% +\ALC@com{##1}\begin{ALC@loop}} +%changed by alex smola + \newcommand{\FUNCTION}[2][default]{\ALC@it\algorithmicfunction\ ##2\ % + \ALC@com{##1}\begin{ALC@func}} + \newcommand{\MAIN}[2][default]{\ALC@it\algorithmicmain\ ##2\ % + \ALC@com{##1}\begin{ALC@main}} +%end changed by alex smola + \newcommand{\REPEAT}[1][default]{\ALC@it\algorithmicrepeat% + \ALC@com{##1}\begin{ALC@rpt}} + \newcommand{\UNTIL}[1]{\end{ALC@rpt}\ALC@it\algorithmicuntil\ ##1} + \ifthenelse{\boolean{ALC@noend}}{ + \newcommand{\ENDIF}{\end{ALC@if}} + \newcommand{\ENDFOR}{\end{ALC@for}} + \newcommand{\ENDWHILE}{\end{ALC@whl}} + \newcommand{\ENDLOOP}{\end{ALC@loop}} + \newcommand{\ENDFUNCTION}{\end{ALC@func}} + \newcommand{\ENDMAIN}{\end{ALC@main}} + }{ + \newcommand{\ENDIF}{\end{ALC@if}\ALC@it\algorithmicendif} + \newcommand{\ENDFOR}{\end{ALC@for}\ALC@it\algorithmicendfor} + \newcommand{\ENDWHILE}{\end{ALC@whl}\ALC@it\algorithmicendwhile} + \newcommand{\ENDLOOP}{\end{ALC@loop}\ALC@it\algorithmicendloop} + \newcommand{\ENDFUNCTION}{\end{ALC@func}\ALC@it\algorithmicendfunction} + \newcommand{\ENDMAIN}{\end{ALC@main}\ALC@it\algorithmicendmain} + } + \renewcommand{\@toodeep}{} + \begin{list}{\ALC@lno}{\setcounter{ALC@line}{0}\setcounter{ALC@rem}{0}% + \itemsep\z@ \itemindent\z@ \listparindent\z@% + \partopsep\z@ \parskip\z@ \parsep\z@% + \labelsep 0.5em \topsep 0.2em% + \ifthenelse{\equal{#1}{0}} + {\labelwidth 0.5em } + {\labelwidth 1.2em } + \leftmargin\labelwidth \addtolength{\leftmargin}{\labelsep} + \ALC@tlm\labelsep + } + } + {\end{list}} + + + + + + + + + + + + + + diff --git a/report/additional-latex-files/fancyhdr.sty b/report/additional-latex-files/fancyhdr.sty new file mode 100644 index 00000000..77ed4e30 --- /dev/null +++ b/report/additional-latex-files/fancyhdr.sty @@ -0,0 +1,485 @@ +% fancyhdr.sty version 3.2 +% Fancy headers and footers for LaTeX. +% Piet van Oostrum, +% Dept of Computer and Information Sciences, University of Utrecht, +% Padualaan 14, P.O. Box 80.089, 3508 TB Utrecht, The Netherlands +% Telephone: +31 30 2532180. Email: piet@cs.uu.nl +% ======================================================================== +% LICENCE: +% This file may be distributed under the terms of the LaTeX Project Public +% License, as described in lppl.txt in the base LaTeX distribution. +% Either version 1 or, at your option, any later version. +% ======================================================================== +% MODIFICATION HISTORY: +% Sep 16, 1994 +% version 1.4: Correction for use with \reversemargin +% Sep 29, 1994: +% version 1.5: Added the \iftopfloat, \ifbotfloat and \iffloatpage commands +% Oct 4, 1994: +% version 1.6: Reset single spacing in headers/footers for use with +% setspace.sty or doublespace.sty +% Oct 4, 1994: +% version 1.7: changed \let\@mkboth\markboth to +% \def\@mkboth{\protect\markboth} to make it more robust +% Dec 5, 1994: +% version 1.8: corrections for amsbook/amsart: define \@chapapp and (more +% importantly) use the \chapter/sectionmark definitions from ps@headings if +% they exist (which should be true for all standard classes). +% May 31, 1995: +% version 1.9: The proposed \renewcommand{\headrulewidth}{\iffloatpage... +% construction in the doc did not work properly with the fancyplain style. +% June 1, 1995: +% version 1.91: The definition of \@mkboth wasn't restored on subsequent +% \pagestyle{fancy}'s. +% June 1, 1995: +% version 1.92: The sequence \pagestyle{fancyplain} \pagestyle{plain} +% \pagestyle{fancy} would erroneously select the plain version. +% June 1, 1995: +% version 1.93: \fancypagestyle command added. +% Dec 11, 1995: +% version 1.94: suggested by Conrad Hughes +% CJCH, Dec 11, 1995: added \footruleskip to allow control over footrule +% position (old hardcoded value of .3\normalbaselineskip is far too high +% when used with very small footer fonts). +% Jan 31, 1996: +% version 1.95: call \@normalsize in the reset code if that is defined, +% otherwise \normalsize. +% this is to solve a problem with ucthesis.cls, as this doesn't +% define \@currsize. Unfortunately for latex209 calling \normalsize doesn't +% work as this is optimized to do very little, so there \@normalsize should +% be called. Hopefully this code works for all versions of LaTeX known to +% mankind. +% April 25, 1996: +% version 1.96: initialize \headwidth to a magic (negative) value to catch +% most common cases that people change it before calling \pagestyle{fancy}. +% Note it can't be initialized when reading in this file, because +% \textwidth could be changed afterwards. This is quite probable. +% We also switch to \MakeUppercase rather than \uppercase and introduce a +% \nouppercase command for use in headers. and footers. +% May 3, 1996: +% version 1.97: Two changes: +% 1. Undo the change in version 1.8 (using the pagestyle{headings} defaults +% for the chapter and section marks. The current version of amsbook and +% amsart classes don't seem to need them anymore. Moreover the standard +% latex classes don't use \markboth if twoside isn't selected, and this is +% confusing as \leftmark doesn't work as expected. +% 2. include a call to \ps@empty in ps@@fancy. This is to solve a problem +% in the amsbook and amsart classes, that make global changes to \topskip, +% which are reset in \ps@empty. Hopefully this doesn't break other things. +% May 7, 1996: +% version 1.98: +% Added % after the line \def\nouppercase +% May 7, 1996: +% version 1.99: This is the alpha version of fancyhdr 2.0 +% Introduced the new commands \fancyhead, \fancyfoot, and \fancyhf. +% Changed \headrulewidth, \footrulewidth, \footruleskip to +% macros rather than length parameters, In this way they can be +% conditionalized and they don't consume length registers. There is no need +% to have them as length registers unless you want to do calculations with +% them, which is unlikely. Note that this may make some uses of them +% incompatible (i.e. if you have a file that uses \setlength or \xxxx=) +% May 10, 1996: +% version 1.99a: +% Added a few more % signs +% May 10, 1996: +% version 1.99b: +% Changed the syntax of \f@nfor to be resistent to catcode changes of := +% Removed the [1] from the defs of \lhead etc. because the parameter is +% consumed by the \@[xy]lhead etc. macros. +% June 24, 1997: +% version 1.99c: +% corrected \nouppercase to also include the protected form of \MakeUppercase +% \global added to manipulation of \headwidth. +% \iffootnote command added. +% Some comments added about \@fancyhead and \@fancyfoot. +% Aug 24, 1998 +% version 1.99d +% Changed the default \ps@empty to \ps@@empty in order to allow +% \fancypagestyle{empty} redefinition. +% Oct 11, 2000 +% version 2.0 +% Added LPPL license clause. +% +% A check for \headheight is added. An errormessage is given (once) if the +% header is too large. Empty headers don't generate the error even if +% \headheight is very small or even 0pt. +% Warning added for the use of 'E' option when twoside option is not used. +% In this case the 'E' fields will never be used. +% +% Mar 10, 2002 +% version 2.1beta +% New command: \fancyhfoffset[place]{length} +% defines offsets to be applied to the header/footer to let it stick into +% the margins (if length > 0). +% place is like in fancyhead, except that only E,O,L,R can be used. +% This replaces the old calculation based on \headwidth and the marginpar +% area. +% \headwidth will be dynamically calculated in the headers/footers when +% this is used. +% +% Mar 26, 2002 +% version 2.1beta2 +% \fancyhfoffset now also takes h,f as possible letters in the argument to +% allow the header and footer widths to be different. +% New commands \fancyheadoffset and \fancyfootoffset added comparable to +% \fancyhead and \fancyfoot. +% Errormessages and warnings have been made more informative. +% +% Dec 9, 2002 +% version 2.1 +% The defaults for \footrulewidth, \plainheadrulewidth and +% \plainfootrulewidth are changed from \z@skip to 0pt. In this way when +% someone inadvertantly uses \setlength to change any of these, the value +% of \z@skip will not be changed, rather an errormessage will be given. + +% March 3, 2004 +% Release of version 3.0 + +% Oct 7, 2004 +% version 3.1 +% Added '\endlinechar=13' to \fancy@reset to prevent problems with +% includegraphics in header when verbatiminput is active. + +% March 22, 2005 +% version 3.2 +% reset \everypar (the real one) in \fancy@reset because spanish.ldf does +% strange things with \everypar between << and >>. + +\def\ifancy@mpty#1{\def\temp@a{#1}\ifx\temp@a\@empty} + +\def\fancy@def#1#2{\ifancy@mpty{#2}\fancy@gbl\def#1{\leavevmode}\else + \fancy@gbl\def#1{#2\strut}\fi} + +\let\fancy@gbl\global + +\def\@fancyerrmsg#1{% + \ifx\PackageError\undefined + \errmessage{#1}\else + \PackageError{Fancyhdr}{#1}{}\fi} +\def\@fancywarning#1{% + \ifx\PackageWarning\undefined + \errmessage{#1}\else + \PackageWarning{Fancyhdr}{#1}{}\fi} + +% Usage: \@forc \var{charstring}{command to be executed for each char} +% This is similar to LaTeX's \@tfor, but expands the charstring. + +\def\@forc#1#2#3{\expandafter\f@rc\expandafter#1\expandafter{#2}{#3}} +\def\f@rc#1#2#3{\def\temp@ty{#2}\ifx\@empty\temp@ty\else + \f@@rc#1#2\f@@rc{#3}\fi} +\def\f@@rc#1#2#3\f@@rc#4{\def#1{#2}#4\f@rc#1{#3}{#4}} + +% Usage: \f@nfor\name:=list\do{body} +% Like LaTeX's \@for but an empty list is treated as a list with an empty +% element + +\newcommand{\f@nfor}[3]{\edef\@fortmp{#2}% + \expandafter\@forloop#2,\@nil,\@nil\@@#1{#3}} + +% Usage: \def@ult \cs{defaults}{argument} +% sets \cs to the characters from defaults appearing in argument +% or defaults if it would be empty. All characters are lowercased. + +\newcommand\def@ult[3]{% + \edef\temp@a{\lowercase{\edef\noexpand\temp@a{#3}}}\temp@a + \def#1{}% + \@forc\tmpf@ra{#2}% + {\expandafter\if@in\tmpf@ra\temp@a{\edef#1{#1\tmpf@ra}}{}}% + \ifx\@empty#1\def#1{#2}\fi} +% +% \if@in +% +\newcommand{\if@in}[4]{% + \edef\temp@a{#2}\def\temp@b##1#1##2\temp@b{\def\temp@b{##1}}% + \expandafter\temp@b#2#1\temp@b\ifx\temp@a\temp@b #4\else #3\fi} + +\newcommand{\fancyhead}{\@ifnextchar[{\f@ncyhf\fancyhead h}% + {\f@ncyhf\fancyhead h[]}} +\newcommand{\fancyfoot}{\@ifnextchar[{\f@ncyhf\fancyfoot f}% + {\f@ncyhf\fancyfoot f[]}} +\newcommand{\fancyhf}{\@ifnextchar[{\f@ncyhf\fancyhf{}}% + {\f@ncyhf\fancyhf{}[]}} + +% New commands for offsets added + +\newcommand{\fancyheadoffset}{\@ifnextchar[{\f@ncyhfoffs\fancyheadoffset h}% + {\f@ncyhfoffs\fancyheadoffset h[]}} +\newcommand{\fancyfootoffset}{\@ifnextchar[{\f@ncyhfoffs\fancyfootoffset f}% + {\f@ncyhfoffs\fancyfootoffset f[]}} +\newcommand{\fancyhfoffset}{\@ifnextchar[{\f@ncyhfoffs\fancyhfoffset{}}% + {\f@ncyhfoffs\fancyhfoffset{}[]}} + +% The header and footer fields are stored in command sequences with +% names of the form: \f@ncy with for [eo], from [lcr] +% and from [hf]. + +\def\f@ncyhf#1#2[#3]#4{% + \def\temp@c{}% + \@forc\tmpf@ra{#3}% + {\expandafter\if@in\tmpf@ra{eolcrhf,EOLCRHF}% + {}{\edef\temp@c{\temp@c\tmpf@ra}}}% + \ifx\@empty\temp@c\else + \@fancyerrmsg{Illegal char `\temp@c' in \string#1 argument: + [#3]}% + \fi + \f@nfor\temp@c{#3}% + {\def@ult\f@@@eo{eo}\temp@c + \if@twoside\else + \if\f@@@eo e\@fancywarning + {\string#1's `E' option without twoside option is useless}\fi\fi + \def@ult\f@@@lcr{lcr}\temp@c + \def@ult\f@@@hf{hf}{#2\temp@c}% + \@forc\f@@eo\f@@@eo + {\@forc\f@@lcr\f@@@lcr + {\@forc\f@@hf\f@@@hf + {\expandafter\fancy@def\csname + f@ncy\f@@eo\f@@lcr\f@@hf\endcsname + {#4}}}}}} + +\def\f@ncyhfoffs#1#2[#3]#4{% + \def\temp@c{}% + \@forc\tmpf@ra{#3}% + {\expandafter\if@in\tmpf@ra{eolrhf,EOLRHF}% + {}{\edef\temp@c{\temp@c\tmpf@ra}}}% + \ifx\@empty\temp@c\else + \@fancyerrmsg{Illegal char `\temp@c' in \string#1 argument: + [#3]}% + \fi + \f@nfor\temp@c{#3}% + {\def@ult\f@@@eo{eo}\temp@c + \if@twoside\else + \if\f@@@eo e\@fancywarning + {\string#1's `E' option without twoside option is useless}\fi\fi + \def@ult\f@@@lcr{lr}\temp@c + \def@ult\f@@@hf{hf}{#2\temp@c}% + \@forc\f@@eo\f@@@eo + {\@forc\f@@lcr\f@@@lcr + {\@forc\f@@hf\f@@@hf + {\expandafter\setlength\csname + f@ncyO@\f@@eo\f@@lcr\f@@hf\endcsname + {#4}}}}}% + \fancy@setoffs} + +% Fancyheadings version 1 commands. These are more or less deprecated, +% but they continue to work. + +\newcommand{\lhead}{\@ifnextchar[{\@xlhead}{\@ylhead}} +\def\@xlhead[#1]#2{\fancy@def\f@ncyelh{#1}\fancy@def\f@ncyolh{#2}} +\def\@ylhead#1{\fancy@def\f@ncyelh{#1}\fancy@def\f@ncyolh{#1}} + +\newcommand{\chead}{\@ifnextchar[{\@xchead}{\@ychead}} +\def\@xchead[#1]#2{\fancy@def\f@ncyech{#1}\fancy@def\f@ncyoch{#2}} +\def\@ychead#1{\fancy@def\f@ncyech{#1}\fancy@def\f@ncyoch{#1}} + +\newcommand{\rhead}{\@ifnextchar[{\@xrhead}{\@yrhead}} +\def\@xrhead[#1]#2{\fancy@def\f@ncyerh{#1}\fancy@def\f@ncyorh{#2}} +\def\@yrhead#1{\fancy@def\f@ncyerh{#1}\fancy@def\f@ncyorh{#1}} + +\newcommand{\lfoot}{\@ifnextchar[{\@xlfoot}{\@ylfoot}} +\def\@xlfoot[#1]#2{\fancy@def\f@ncyelf{#1}\fancy@def\f@ncyolf{#2}} +\def\@ylfoot#1{\fancy@def\f@ncyelf{#1}\fancy@def\f@ncyolf{#1}} + +\newcommand{\cfoot}{\@ifnextchar[{\@xcfoot}{\@ycfoot}} +\def\@xcfoot[#1]#2{\fancy@def\f@ncyecf{#1}\fancy@def\f@ncyocf{#2}} +\def\@ycfoot#1{\fancy@def\f@ncyecf{#1}\fancy@def\f@ncyocf{#1}} + +\newcommand{\rfoot}{\@ifnextchar[{\@xrfoot}{\@yrfoot}} +\def\@xrfoot[#1]#2{\fancy@def\f@ncyerf{#1}\fancy@def\f@ncyorf{#2}} +\def\@yrfoot#1{\fancy@def\f@ncyerf{#1}\fancy@def\f@ncyorf{#1}} + +\newlength{\fancy@headwidth} +\let\headwidth\fancy@headwidth +\newlength{\f@ncyO@elh} +\newlength{\f@ncyO@erh} +\newlength{\f@ncyO@olh} +\newlength{\f@ncyO@orh} +\newlength{\f@ncyO@elf} +\newlength{\f@ncyO@erf} +\newlength{\f@ncyO@olf} +\newlength{\f@ncyO@orf} +\newcommand{\headrulewidth}{0.4pt} +\newcommand{\footrulewidth}{0pt} +\newcommand{\footruleskip}{.3\normalbaselineskip} + +% Fancyplain stuff shouldn't be used anymore (rather +% \fancypagestyle{plain} should be used), but it must be present for +% compatibility reasons. + +\newcommand{\plainheadrulewidth}{0pt} +\newcommand{\plainfootrulewidth}{0pt} +\newif\if@fancyplain \@fancyplainfalse +\def\fancyplain#1#2{\if@fancyplain#1\else#2\fi} + +\headwidth=-123456789sp %magic constant + +% Command to reset various things in the headers: +% a.o. single spacing (taken from setspace.sty) +% and the catcode of ^^M (so that epsf files in the header work if a +% verbatim crosses a page boundary) +% It also defines a \nouppercase command that disables \uppercase and +% \Makeuppercase. It can only be used in the headers and footers. +\let\fnch@everypar\everypar% save real \everypar because of spanish.ldf +\def\fancy@reset{\fnch@everypar{}\restorecr\endlinechar=13 + \def\baselinestretch{1}% + \def\nouppercase##1{{\let\uppercase\relax\let\MakeUppercase\relax + \expandafter\let\csname MakeUppercase \endcsname\relax##1}}% + \ifx\undefined\@newbaseline% NFSS not present; 2.09 or 2e + \ifx\@normalsize\undefined \normalsize % for ucthesis.cls + \else \@normalsize \fi + \else% NFSS (2.09) present + \@newbaseline% + \fi} + +% Initialization of the head and foot text. + +% The default values still contain \fancyplain for compatibility. +\fancyhf{} % clear all +% lefthead empty on ``plain'' pages, \rightmark on even, \leftmark on odd pages +% evenhead empty on ``plain'' pages, \leftmark on even, \rightmark on odd pages +\if@twoside + \fancyhead[el,or]{\fancyplain{}{\sl\rightmark}} + \fancyhead[er,ol]{\fancyplain{}{\sl\leftmark}} +\else + \fancyhead[l]{\fancyplain{}{\sl\rightmark}} + \fancyhead[r]{\fancyplain{}{\sl\leftmark}} +\fi +\fancyfoot[c]{\rm\thepage} % page number + +% Use box 0 as a temp box and dimen 0 as temp dimen. +% This can be done, because this code will always +% be used inside another box, and therefore the changes are local. + +\def\@fancyvbox#1#2{\setbox0\vbox{#2}\ifdim\ht0>#1\@fancywarning + {\string#1 is too small (\the#1): ^^J Make it at least \the\ht0.^^J + We now make it that large for the rest of the document.^^J + This may cause the page layout to be inconsistent, however\@gobble}% + \dimen0=#1\global\setlength{#1}{\ht0}\ht0=\dimen0\fi + \box0} + +% Put together a header or footer given the left, center and +% right text, fillers at left and right and a rule. +% The \lap commands put the text into an hbox of zero size, +% so overlapping text does not generate an errormessage. +% These macros have 5 parameters: +% 1. LEFTSIDE BEARING % This determines at which side the header will stick +% out. When \fancyhfoffset is used this calculates \headwidth, otherwise +% it is \hss or \relax (after expansion). +% 2. \f@ncyolh, \f@ncyelh, \f@ncyolf or \f@ncyelf. This is the left component. +% 3. \f@ncyoch, \f@ncyech, \f@ncyocf or \f@ncyecf. This is the middle comp. +% 4. \f@ncyorh, \f@ncyerh, \f@ncyorf or \f@ncyerf. This is the right component. +% 5. RIGHTSIDE BEARING. This is always \relax or \hss (after expansion). + +\def\@fancyhead#1#2#3#4#5{#1\hbox to\headwidth{\fancy@reset + \@fancyvbox\headheight{\hbox + {\rlap{\parbox[b]{\headwidth}{\raggedright#2}}\hfill + \parbox[b]{\headwidth}{\centering#3}\hfill + \llap{\parbox[b]{\headwidth}{\raggedleft#4}}}\headrule}}#5} + +\def\@fancyfoot#1#2#3#4#5{#1\hbox to\headwidth{\fancy@reset + \@fancyvbox\footskip{\footrule + \hbox{\rlap{\parbox[t]{\headwidth}{\raggedright#2}}\hfill + \parbox[t]{\headwidth}{\centering#3}\hfill + \llap{\parbox[t]{\headwidth}{\raggedleft#4}}}}}#5} + +\def\headrule{{\if@fancyplain\let\headrulewidth\plainheadrulewidth\fi + \hrule\@height\headrulewidth\@width\headwidth \vskip-\headrulewidth}} + +\def\footrule{{\if@fancyplain\let\footrulewidth\plainfootrulewidth\fi + \vskip-\footruleskip\vskip-\footrulewidth + \hrule\@width\headwidth\@height\footrulewidth\vskip\footruleskip}} + +\def\ps@fancy{% +\@ifundefined{@chapapp}{\let\@chapapp\chaptername}{}%for amsbook +% +% Define \MakeUppercase for old LaTeXen. +% Note: we used \def rather than \let, so that \let\uppercase\relax (from +% the version 1 documentation) will still work. +% +\@ifundefined{MakeUppercase}{\def\MakeUppercase{\uppercase}}{}% +\@ifundefined{chapter}{\def\sectionmark##1{\markboth +{\MakeUppercase{\ifnum \c@secnumdepth>\z@ + \thesection\hskip 1em\relax \fi ##1}}{}}% +\def\subsectionmark##1{\markright {\ifnum \c@secnumdepth >\@ne + \thesubsection\hskip 1em\relax \fi ##1}}}% +{\def\chaptermark##1{\markboth {\MakeUppercase{\ifnum \c@secnumdepth>\m@ne + \@chapapp\ \thechapter. \ \fi ##1}}{}}% +\def\sectionmark##1{\markright{\MakeUppercase{\ifnum \c@secnumdepth >\z@ + \thesection. \ \fi ##1}}}}% +%\csname ps@headings\endcsname % use \ps@headings defaults if they exist +\ps@@fancy +\gdef\ps@fancy{\@fancyplainfalse\ps@@fancy}% +% Initialize \headwidth if the user didn't +% +\ifdim\headwidth<0sp +% +% This catches the case that \headwidth hasn't been initialized and the +% case that the user added something to \headwidth in the expectation that +% it was initialized to \textwidth. We compensate this now. This loses if +% the user intended to multiply it by a factor. But that case is more +% likely done by saying something like \headwidth=1.2\textwidth. +% The doc says you have to change \headwidth after the first call to +% \pagestyle{fancy}. This code is just to catch the most common cases were +% that requirement is violated. +% + \global\advance\headwidth123456789sp\global\advance\headwidth\textwidth +\fi} +\def\ps@fancyplain{\ps@fancy \let\ps@plain\ps@plain@fancy} +\def\ps@plain@fancy{\@fancyplaintrue\ps@@fancy} +\let\ps@@empty\ps@empty +\def\ps@@fancy{% +\ps@@empty % This is for amsbook/amsart, which do strange things with \topskip +\def\@mkboth{\protect\markboth}% +\def\@oddhead{\@fancyhead\fancy@Oolh\f@ncyolh\f@ncyoch\f@ncyorh\fancy@Oorh}% +\def\@oddfoot{\@fancyfoot\fancy@Oolf\f@ncyolf\f@ncyocf\f@ncyorf\fancy@Oorf}% +\def\@evenhead{\@fancyhead\fancy@Oelh\f@ncyelh\f@ncyech\f@ncyerh\fancy@Oerh}% +\def\@evenfoot{\@fancyfoot\fancy@Oelf\f@ncyelf\f@ncyecf\f@ncyerf\fancy@Oerf}% +} +% Default definitions for compatibility mode: +% These cause the header/footer to take the defined \headwidth as width +% And to shift in the direction of the marginpar area + +\def\fancy@Oolh{\if@reversemargin\hss\else\relax\fi} +\def\fancy@Oorh{\if@reversemargin\relax\else\hss\fi} +\let\fancy@Oelh\fancy@Oorh +\let\fancy@Oerh\fancy@Oolh + +\let\fancy@Oolf\fancy@Oolh +\let\fancy@Oorf\fancy@Oorh +\let\fancy@Oelf\fancy@Oelh +\let\fancy@Oerf\fancy@Oerh + +% New definitions for the use of \fancyhfoffset +% These calculate the \headwidth from \textwidth and the specified offsets. + +\def\fancy@offsolh{\headwidth=\textwidth\advance\headwidth\f@ncyO@olh + \advance\headwidth\f@ncyO@orh\hskip-\f@ncyO@olh} +\def\fancy@offselh{\headwidth=\textwidth\advance\headwidth\f@ncyO@elh + \advance\headwidth\f@ncyO@erh\hskip-\f@ncyO@elh} + +\def\fancy@offsolf{\headwidth=\textwidth\advance\headwidth\f@ncyO@olf + \advance\headwidth\f@ncyO@orf\hskip-\f@ncyO@olf} +\def\fancy@offself{\headwidth=\textwidth\advance\headwidth\f@ncyO@elf + \advance\headwidth\f@ncyO@erf\hskip-\f@ncyO@elf} + +\def\fancy@setoffs{% +% Just in case \let\headwidth\textwidth was used + \fancy@gbl\let\headwidth\fancy@headwidth + \fancy@gbl\let\fancy@Oolh\fancy@offsolh + \fancy@gbl\let\fancy@Oelh\fancy@offselh + \fancy@gbl\let\fancy@Oorh\hss + \fancy@gbl\let\fancy@Oerh\hss + \fancy@gbl\let\fancy@Oolf\fancy@offsolf + \fancy@gbl\let\fancy@Oelf\fancy@offself + \fancy@gbl\let\fancy@Oorf\hss + \fancy@gbl\let\fancy@Oerf\hss} + +\newif\iffootnote +\let\latex@makecol\@makecol +\def\@makecol{\ifvoid\footins\footnotetrue\else\footnotefalse\fi +\let\topfloat\@toplist\let\botfloat\@botlist\latex@makecol} +\def\iftopfloat#1#2{\ifx\topfloat\empty #2\else #1\fi} +\def\ifbotfloat#1#2{\ifx\botfloat\empty #2\else #1\fi} +\def\iffloatpage#1#2{\if@fcolmade #1\else #2\fi} + +\newcommand{\fancypagestyle}[2]{% + \@namedef{ps@#1}{\let\fancy@gbl\relax#2\relax\ps@fancy}} diff --git a/report/additional-latex-files/natbib.sty b/report/additional-latex-files/natbib.sty new file mode 100644 index 00000000..ff0d0b91 --- /dev/null +++ b/report/additional-latex-files/natbib.sty @@ -0,0 +1,1246 @@ +%% +%% This is file `natbib.sty', +%% generated with the docstrip utility. +%% +%% The original source files were: +%% +%% natbib.dtx (with options: `package,all') +%% ============================================= +%% IMPORTANT NOTICE: +%% +%% This program can be redistributed and/or modified under the terms +%% of the LaTeX Project Public License Distributed from CTAN +%% archives in directory macros/latex/base/lppl.txt; either +%% version 1 of the License, or any later version. +%% +%% This is a generated file. +%% It may not be distributed without the original source file natbib.dtx. +%% +%% Full documentation can be obtained by LaTeXing that original file. +%% Only a few abbreviated comments remain here to describe the usage. +%% ============================================= +%% Copyright 1993-2009 Patrick W Daly +%% Max-Planck-Institut f\"ur Sonnensystemforschung +%% Max-Planck-Str. 2 +%% D-37191 Katlenburg-Lindau +%% Germany +%% E-mail: daly@mps.mpg.de +\NeedsTeXFormat{LaTeX2e}[1995/06/01] +\ProvidesPackage{natbib} + [2009/07/16 8.31 (PWD, AO)] + + % This package reimplements the LaTeX \cite command to be used for various + % citation styles, both author-year and numerical. It accepts BibTeX + % output intended for many other packages, and therefore acts as a + % general, all-purpose citation-style interface. + % + % With standard numerical .bst files, only numerical citations are + % possible. With an author-year .bst file, both numerical and + % author-year citations are possible. + % + % If author-year citations are selected, \bibitem must have one of the + % following forms: + % \bibitem[Jones et al.(1990)]{key}... + % \bibitem[Jones et al.(1990)Jones, Baker, and Williams]{key}... + % \bibitem[Jones et al., 1990]{key}... + % \bibitem[\protect\citeauthoryear{Jones, Baker, and Williams}{Jones + % et al.}{1990}]{key}... + % \bibitem[\protect\citeauthoryear{Jones et al.}{1990}]{key}... + % \bibitem[\protect\astroncite{Jones et al.}{1990}]{key}... + % \bibitem[\protect\citename{Jones et al., }1990]{key}... + % \harvarditem[Jones et al.]{Jones, Baker, and Williams}{1990}{key}... + % + % This is either to be made up manually, or to be generated by an + % appropriate .bst file with BibTeX. + % Author-year mode || Numerical mode + % Then, \citet{key} ==>> Jones et al. (1990) || Jones et al. [21] + % \citep{key} ==>> (Jones et al., 1990) || [21] + % Multiple citations as normal: + % \citep{key1,key2} ==>> (Jones et al., 1990; Smith, 1989) || [21,24] + % or (Jones et al., 1990, 1991) || [21,24] + % or (Jones et al., 1990a,b) || [21,24] + % \cite{key} is the equivalent of \citet{key} in author-year mode + % and of \citep{key} in numerical mode + % Full author lists may be forced with \citet* or \citep*, e.g. + % \citep*{key} ==>> (Jones, Baker, and Williams, 1990) + % Optional notes as: + % \citep[chap. 2]{key} ==>> (Jones et al., 1990, chap. 2) + % \citep[e.g.,][]{key} ==>> (e.g., Jones et al., 1990) + % \citep[see][pg. 34]{key}==>> (see Jones et al., 1990, pg. 34) + % (Note: in standard LaTeX, only one note is allowed, after the ref. + % Here, one note is like the standard, two make pre- and post-notes.) + % \citealt{key} ==>> Jones et al. 1990 + % \citealt*{key} ==>> Jones, Baker, and Williams 1990 + % \citealp{key} ==>> Jones et al., 1990 + % \citealp*{key} ==>> Jones, Baker, and Williams, 1990 + % Additional citation possibilities (both author-year and numerical modes) + % \citeauthor{key} ==>> Jones et al. + % \citeauthor*{key} ==>> Jones, Baker, and Williams + % \citeyear{key} ==>> 1990 + % \citeyearpar{key} ==>> (1990) + % \citetext{priv. comm.} ==>> (priv. comm.) + % \citenum{key} ==>> 11 [non-superscripted] + % Note: full author lists depends on whether the bib style supports them; + % if not, the abbreviated list is printed even when full requested. + % + % For names like della Robbia at the start of a sentence, use + % \Citet{dRob98} ==>> Della Robbia (1998) + % \Citep{dRob98} ==>> (Della Robbia, 1998) + % \Citeauthor{dRob98} ==>> Della Robbia + % + % + % Citation aliasing is achieved with + % \defcitealias{key}{text} + % \citetalias{key} ==>> text + % \citepalias{key} ==>> (text) + % + % Defining the citation mode and punctual (citation style) + % \setcitestyle{} + % Example: \setcitestyle{square,semicolon} + % Alternatively: + % Use \bibpunct with 6 mandatory arguments: + % 1. opening bracket for citation + % 2. closing bracket + % 3. citation separator (for multiple citations in one \cite) + % 4. the letter n for numerical styles, s for superscripts + % else anything for author-year + % 5. punctuation between authors and date + % 6. punctuation between years (or numbers) when common authors missing + % One optional argument is the character coming before post-notes. It + % appears in square braces before all other arguments. May be left off. + % Example (and default) \bibpunct[, ]{(}{)}{;}{a}{,}{,} + % + % To make this automatic for a given bib style, named newbib, say, make + % a local configuration file, natbib.cfg, with the definition + % \newcommand{\bibstyle@newbib}{\bibpunct...} + % Then the \bibliographystyle{newbib} will cause \bibstyle@newbib to + % be called on THE NEXT LATEX RUN (via the aux file). + % + % Such preprogrammed definitions may be invoked anywhere in the text + % by calling \citestyle{newbib}. This is only useful if the style specified + % differs from that in \bibliographystyle. + % + % With \citeindextrue and \citeindexfalse, one can control whether the + % \cite commands make an automatic entry of the citation in the .idx + % indexing file. For this, \makeindex must also be given in the preamble. + % + % Package Options: (for selecting punctuation) + % round - round parentheses are used (default) + % square - square brackets are used [option] + % curly - curly braces are used {option} + % angle - angle brackets are used